source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__lt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_08__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_02__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_04__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int32) // A*D function (colscale): GB (_AxD__lt_int32) // D*A function (rowscale): GB (_DxB__lt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__lt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__lt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int32) // C=scalar+B GB (_bind1st__lt_int32) // C=scalar+B' GB (_bind1st_tran__lt_int32) // C=A+scalar GB (_bind2nd__lt_int32) // C=A'+scalar GB (_bind2nd_tran__lt_int32) // C type: bool // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT32 || GxB_NO_LT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mscash2_fmt_plug.c
/* MSCASH2 patch for John the Ripper written by S3nf in 2010, 2011 * a slow but working version * * Cracking Domain Cached Credentials for modern Windows operating systems, supporting: * - Windows Vista * - Windows 7 * - Windows Server 2008 * * This software was written by S3nf in 2010, 2011. No copyright is claimed, and the software is hereby placed in * the public domain. In case this attempt to disclaim copyright and place the software in the public domain * is deemed null and void, then the software is Copyright (c) 2010, 2011 S3nf and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without modification, are permitted. * * Modified for optional utf-8 support by magnum 2011, same terms as above * * Code redone/optimized by JimF June 2011. (2x to 10x improvement in speed) * - Code converted to oSSL (for non-sse builds). The inline MD4/SHA1 replaced. This reduced * about 900 lines down to 60 or so, which were much easier to follow. This was a preliminary * step to getting SSE2 added. Once done, this ended up faster than the original, so the new * simplified code was kept. * - Setup of ipad/opad only done once per PW/Salt about 10-15% speedup * - 1/2 of the encryption performed within inner loop was moved outside of inner loop (nearly doubles speed) * - changed signature from M$salt#hash to $DCC2$iterations#salt#hash * - variable iterations now 'possible'. Default is 10240 * - increased salt (user name) upto 22 UC2 characters. Bug in original code only allowed up to 8 chars. * - Added SSE2(/MMX) and SSE2i to the deep inner loop. 2x to 4x speedup. * - total about 2x to 10x improvment in speed (depending upon CPU and compiler). Some compilers * were more efficient with original code, and thus received less of a performance boost. Others * got a signicant improvment. * - The utf8 code was greatly simplified. There was no reason to try to optimized the UTF code as * the format is so slow that utf8 conversion is a non-issue. Thus we always call the enc_to_utf16() * at the proper locations, and let that function deal with being in --encoding=utf8 switch mode or not. * - Fixed code to properly work with BE systems, and alignment required systems. * - Made some 'interface' changes to the SSE2i for SHA1, and to the sha-mmx.S code, to make it work * properly, and to make it more efficient. We deal with 2 SHA1 states, and alternate back and forth * between them. The changes to the SSE2i code, were to optimize this dual state, and the changes * to the .S code were simply to make it work at all and the same optimizations were placed there. * - the OMP code was removed during initial re-write, and was properly re-incorporated by magnum. * * In June 2013, salt length (Username) increased from 22 to 128, and max password length increased * from 27 to 125 bytes (unicode bytes, so 250 ?) * * This module is based on: * - the MSCASH patch for john written by Alain Espinosa <alainesp at gmail.com> in 2007 * - RFC 1320 - The MD4 Message-Digest Algorithm * - RFC 2104 - HMAC: Keyed-Hashing for Message Authentication * - RFC 3174 - US Secure Hash Algorithm 1 (SHA1) * - the HMAC-SHA1 implementation of the PolarSSL open source cryptographic library (http://polarssl.org/) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mscash2; #elif FMT_REGISTERS_H john_register_one(&fmt_mscash2); #else #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "unicode.h" #include "options.h" #include "unicode.h" #include "sha.h" #include "md4.h" #include "simd-intrinsics.h" #include "loader.h" #include "mscash_common.h" #if defined (_OPENMP) #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // Tuned on Corei7 Quad-HT #endif #endif #include "memdbg.h" #define ITERATIONS 10240 static unsigned iteration_cnt = (ITERATIONS); /* this will get changed at runtime, salt loading */ #define FORMAT_LABEL "mscash2" #define FORMAT_NAME "MS Cache Hash 2 (DCC2)" #define MAX_SALT_LEN 128 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE (MAX_SALT_LEN*2+4) #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define MS_NUM_KEYS (SIMD_COEF_32*SIMD_PARA_SHA1) // Ok, now we have our MMX/SSE2/intr buffer. // this version works properly for MMX, SSE2 (.S) and SSE2 intrinsic. #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) //for endianity conversion static unsigned char (*sse_hash1); static unsigned char (*sse_crypt1); static unsigned char (*sse_crypt2); #else # define MS_NUM_KEYS 1 #endif #define MIN_KEYS_PER_CRYPT MS_NUM_KEYS #define MAX_KEYS_PER_CRYPT MS_NUM_KEYS #define U16_KEY_LEN (2*PLAINTEXT_LENGTH) #define HASH_LEN (16+48) static unsigned char *salt_buffer; static unsigned int salt_len; static unsigned char(*key); static unsigned int new_key = 1; static unsigned char(*md4hash); // allows the md4 of user, and salt to be appended to it. the md4 is ntlm, with the salt is DCC1 static unsigned int (*crypt_out); static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); if (omp_t < 1) omp_t = 1; self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key = mem_calloc(self->params.max_keys_per_crypt, (PLAINTEXT_LENGTH + 1)); md4hash = mem_calloc(self->params.max_keys_per_crypt, HASH_LEN); crypt_out = mem_calloc(self->params.max_keys_per_crypt, BINARY_SIZE); #if defined (SIMD_COEF_32) sse_hash1 = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*sse_hash1)*SHA_BUF_SIZ*4, MEM_ALIGN_SIMD); sse_crypt1 = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*sse_crypt1) * 20, MEM_ALIGN_SIMD); sse_crypt2 = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*sse_crypt2) * 20, MEM_ALIGN_SIMD); { int index; for (index = 0; index < self->params.max_keys_per_crypt; ++index) { // set the length of all hash1 SSE buffer to 64+20 * 8 bits // The 64 is for the ipad/opad, the 20 is for the length of the SHA1 buffer that also gets into each crypt // this works for SSEi ((unsigned int *)sse_hash1)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = (84<<3); // all encrypts are 64+20 bytes. sse_hash1[GETPOS(20,index)] = 0x80; } } // From this point on, we ONLY touch the first 20 bytes (* SIMD_COEF_32) of each buffer 'block'. If !SHA_PARA', then only the first // block is written to after this, if there are more that one SHA_PARA, then the start of each para block will be updated inside the inner loop. #endif mscash2_adjust_tests(options.target_enc, PLAINTEXT_LENGTH, MAX_SALT_LEN); } static void done(void) { #ifdef SIMD_COEF_32 MEM_FREE(sse_crypt2); MEM_FREE(sse_crypt1); MEM_FREE(sse_hash1); #endif MEM_FREE(crypt_out); MEM_FREE(md4hash); MEM_FREE(key); } static int valid(char *ciphertext, struct fmt_main *self) { return mscash2_common_valid(ciphertext, MAX_SALT_LEN, self); } static void set_salt(void *salt) { UTF16 *p = (UTF16*)salt; salt_len = *p++; iteration_cnt = *p++; salt_buffer = (unsigned char*)p; } static void *get_salt(char *_ciphertext) { unsigned char *ciphertext = (unsigned char *)_ciphertext; static UTF16 out[130+1]; unsigned char input[MAX_SALT_LEN*3+1]; int iterations, utf16len, md4_size; memset(out, 0, sizeof(out)); ciphertext += FORMAT_TAG2_LEN; while (*ciphertext && *ciphertext != '#') ++ciphertext; ++ciphertext; for (md4_size=0;md4_size<sizeof(input)-1;md4_size++) { if (ciphertext[md4_size] == '#') break; input[md4_size] = ciphertext[md4_size]; } input[md4_size] = 0; utf16len = enc_to_utf16(&out[2], MAX_SALT_LEN, input, md4_size); if (utf16len < 0) utf16len = strlen16(&out[2]); out[0] = utf16len << 1; sscanf(&_ciphertext[6], "%d", &iterations); out[1] = iterations; return out; } static void *get_binary(char *ciphertext) { static unsigned int out[BINARY_SIZE / sizeof(unsigned int)]; unsigned int i = 0; unsigned int temp; for (; ciphertext[0] != '#'; ciphertext++); ciphertext++; for (; ciphertext[0] != '#'; ciphertext++); ciphertext++; for (; i < 4 ;i++) { #if ARCH_LITTLE_ENDIAN temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 0])])) << 4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 2])])) << 12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 3])])) << 8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 4])])) << 20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 5])])) << 16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 6])])) << 28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 7])])) << 24; #else temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 6])])) << 4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 7])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 4])])) << 12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 5])])) << 8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 2])])) << 20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 3])])) << 16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 0])])) << 28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 1])])) << 24; #endif out[i] = temp; } #ifdef SIMD_COEF_32 alter_endianity(out, BINARY_SIZE); #endif return out; } static int binary_hash_0(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_6; } static int get_hash_0(int index) { return crypt_out[4 * index + 3] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[4 * index + 3] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[4 * index + 3] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[4 * index + 3] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[4 * index + 3] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[4 * index + 3] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[4 * index + 3] & PH_MASK_6; } static int cmp_all(void *binary, int count) { unsigned int i = 0; unsigned int d = ((unsigned int *)binary)[3]; for (; i < count; i++) if (d == crypt_out[i * 4 + 3]) return 1; return 0; } static int cmp_one(void * binary, int index) { unsigned int *t = (unsigned int *)binary; unsigned int a = crypt_out[4 * index + 0]; unsigned int b = crypt_out[4 * index + 1]; unsigned int c = crypt_out[4 * index + 2]; unsigned int d = crypt_out[4 * index + 3]; if (d != t[3]) return 0; if (c != t[2]) return 0; if (b != t[1]) return 0; return (a == t[0]); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *_key, int index) { strnzcpy ((char*)&key[index*(PLAINTEXT_LENGTH + 1)], _key, (PLAINTEXT_LENGTH + 1)); new_key = 1; } static char *get_key(int index) { return (char*)&key[index*(PLAINTEXT_LENGTH + 1)]; } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *n = salt, i; unsigned char *s = (unsigned char*)n; unsigned int hash = 5381; for (i = 0; i < (*n+2); ++i) hash = ((hash<<5)+hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } #ifdef SIMD_COEF_32 // NOTE, in the end, this block will move above the pbkdf2() function, and the #else and #endif wrapping that function will be // uncommented. Thus, if built for SSE2 (mmx, or intrisic), we get this function. Otherwise we get the pbkdf2() function which // uses OpenSSL. However to get the 'layout' right, The code here will walk through the array buffer, calling the pbkdf2 // function. static void pbkdf2_sse2(int t) { // Thread safe, t is our thread number. // All indexes into buffers are offset by (t * MS_NUM_KEYS * (size)) SHA_CTX ctx1, ctx2; unsigned int ipad[SHA_LBLOCK], opad[SHA_LBLOCK]; unsigned int tmp_hash[SHA_DIGEST_LENGTH/4]; unsigned int i, j, k, *i1, *i2, *o1, *t_crypt; unsigned char *t_sse_crypt1, *t_sse_crypt2, *t_sse_hash1; memset(&ipad[4], 0x36, SHA_CBLOCK-16); memset(&opad[4], 0x5C, SHA_CBLOCK-16); // All pointers get their offset for this thread here. No further offsetting below. t_crypt = &crypt_out[t * MS_NUM_KEYS * 4]; t_sse_crypt1 = &sse_crypt1[t * MS_NUM_KEYS * 20]; t_sse_crypt2 = &sse_crypt2[t * MS_NUM_KEYS * 20]; t_sse_hash1 = &sse_hash1[t * MS_NUM_KEYS * SHA_BUF_SIZ * 4]; i1 = (unsigned int*)t_sse_crypt1; i2 = (unsigned int*)t_sse_crypt2; o1 = (unsigned int*)t_sse_hash1; for(k = 0; k < MS_NUM_KEYS; ++k) { for(i = 0;i < 4;i++) { ipad[i] = t_crypt[k*4+i]^0x36363636; opad[i] = t_crypt[k*4+i]^0x5C5C5C5C; } SHA1_Init(&ctx1); SHA1_Init(&ctx2); SHA1_Update(&ctx1,ipad,SHA_CBLOCK); SHA1_Update(&ctx2,opad,SHA_CBLOCK); // we memcopy from flat into SIMD_COEF_32 output buffer's (our 'temp' ctx buffer). // This data will NOT need to be BE swapped (it already IS BE swapped). i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))] = ctx1.h0; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = ctx1.h1; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = ctx1.h2; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = ctx1.h3; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx1.h4; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))] = ctx2.h0; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = ctx2.h1; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = ctx2.h2; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = ctx2.h3; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx2.h4; SHA1_Update(&ctx1,salt_buffer,salt_len); SHA1_Update(&ctx1,"\x0\x0\x0\x1",4); SHA1_Final((unsigned char*)tmp_hash,&ctx1); SHA1_Update(&ctx2,(unsigned char*)tmp_hash,SHA_DIGEST_LENGTH); SHA1_Final((unsigned char*)tmp_hash,&ctx2); // now convert this from flat into SIMD_COEF_32 buffers. // Also, perform the 'first' ^= into the crypt buffer. NOTE, we are doing that in BE format // so we will need to 'undo' that in the end. o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))] = t_crypt[k*4+0] = ctx2.h0; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = t_crypt[k*4+1] = ctx2.h1; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = t_crypt[k*4+2] = ctx2.h2; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = t_crypt[k*4+3] = ctx2.h3; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx2.h4; } for(i = 1; i < iteration_cnt; i++) { SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt1, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt2, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); // only xor first 16 bytes, since that is ALL this format uses for (k = 0; k < MS_NUM_KEYS; k++) { unsigned *p = &((unsigned int*)t_sse_hash1)[k/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32 + (k&(SIMD_COEF_32-1))]; for(j = 0; j < 4; j++) t_crypt[k*4+j] ^= p[(j*SIMD_COEF_32)]; } } } #else /* * This function is derived from IEEE Std 802.11-2004, Clause H.4. * The main construction is from PKCS#5 v2.0. It is tweaked a little * to remove some code not needed for our SHA1-128 output. */ static void pbkdf2(unsigned int _key[]) // key is also 'final' digest. { SHA_CTX ctx1, ctx2, tmp_ctx1, tmp_ctx2; unsigned char ipad[SHA_CBLOCK], opad[SHA_CBLOCK]; unsigned int tmp_hash[SHA_DIGEST_LENGTH/4]; unsigned i, j; unsigned char *key = (unsigned char*)_key; for(i = 0; i < 16; i++) { ipad[i] = key[i]^0x36; opad[i] = key[i]^0x5C; } memset(&ipad[16], 0x36, sizeof(ipad)-16); memset(&opad[16], 0x5C, sizeof(opad)-16); SHA1_Init(&ctx1); SHA1_Init(&ctx2); SHA1_Update(&ctx1, ipad, SHA_CBLOCK); SHA1_Update(&ctx2, opad, SHA_CBLOCK); memcpy(&tmp_ctx1, &ctx1, sizeof(SHA_CTX)); memcpy(&tmp_ctx2, &ctx2, sizeof(SHA_CTX)); SHA1_Update(&ctx1, salt_buffer, salt_len); SHA1_Update(&ctx1, "\x0\x0\x0\x1", 4); SHA1_Final((unsigned char*)tmp_hash,&ctx1); SHA1_Update(&ctx2, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH); // we have to sha1 final to a 'temp' buffer, since we can only overwrite first 16 bytes // of the _key buffer. If we overwrote 20 bytes, then we would lose the first 4 bytes // of the next element (and overwrite end of buffer on last element). SHA1_Final((unsigned char*)tmp_hash, &ctx2); // only copy first 16 bytes, since that is ALL this format uses memcpy(_key, tmp_hash, 16); for(i = 1; i < iteration_cnt; i++) { // we only need to copy the accumulator data from the CTX, since // the original encryption was a full block of 64 bytes. memcpy(&ctx1, &tmp_ctx1, sizeof(SHA_CTX)-(64+sizeof(unsigned int))); SHA1_Update(&ctx1, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH); SHA1_Final((unsigned char*)tmp_hash, &ctx1); memcpy(&ctx2, &tmp_ctx2, sizeof(SHA_CTX)-(64+sizeof(unsigned int))); SHA1_Update(&ctx2, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH); SHA1_Final((unsigned char*)tmp_hash, &ctx2); // only xor first 16 bytes, since that is ALL this format uses for(j = 0; j < 4; j++) _key[j] ^= tmp_hash[j]; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i, t, t1; // Note, for a format like DCC2, there is little reason to optimize anything other // than the pbkdf2 inner loop. The one exception to that, is the NTLM can be done // and known when to be done, only when the // now get NTLM of the password (MD4 of unicode) if (new_key) { #if MS_NUM_KEYS > 1 && defined(_OPENMP) #pragma omp parallel for default(none) private(i) shared(count, key, md4hash) #endif for (i = 0; i < count; ++i) { int utf16len; UTF16 pass_unicode[PLAINTEXT_LENGTH+1]; MD4_CTX ctx; utf16len = enc_to_utf16(pass_unicode, PLAINTEXT_LENGTH, &key[(PLAINTEXT_LENGTH + 1)*i], strlen((char*)&key[(PLAINTEXT_LENGTH + 1)*i])); if (utf16len <= 0) { key[(PLAINTEXT_LENGTH + 1)*i-utf16len] = 0; if (utf16len != 0) utf16len = strlen16(pass_unicode); } MD4_Init(&ctx); MD4_Update(&ctx, pass_unicode, utf16len<<1); MD4_Final(&md4hash[HASH_LEN*i], &ctx); } new_key = 0; } #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(count, salt_buffer, salt_len, crypt_out, md4hash) #endif for (t1 = 0; t1 < count; t1 += MS_NUM_KEYS) { MD4_CTX ctx; int i; t = t1 / MS_NUM_KEYS; for (i = 0; i < MS_NUM_KEYS; ++i) { // Get DCC1. That is MD4( NTLM . unicode(lc username) ) MD4_Init(&ctx); MD4_Update(&ctx, &md4hash[(t * MS_NUM_KEYS + i) * HASH_LEN], 16); MD4_Update(&ctx, salt_buffer, salt_len); MD4_Final((unsigned char*)&crypt_out[(t * MS_NUM_KEYS + i) * 4], &ctx); // now we have DCC1 (mscash) which is MD4 (MD4(unicode(pass)) . unicode(lc username)) #ifndef SIMD_COEF_32 // Non-SSE: Compute DCC2 one at a time pbkdf2(&crypt_out[(t * MS_NUM_KEYS + i) * 4]); #endif } #ifdef SIMD_COEF_32 // SSE: Compute DCC2 in parallel, once per thread pbkdf2_sse2(t); #endif } return count; } struct fmt_main fmt_mscash2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG2 }, mscash2_common_tests }, { init, done, fmt_default_reset, mscash2_common_prepare, valid, mscash2_common_split, get_binary, get_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_select_phase2.c
//------------------------------------------------------------------------------ // GB_select_phase2: C=select(A,thunk) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ai = A->i ; const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ; size_t asize = A->type->size ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; // if A is bitmap, the bitmap selector is always used instead ASSERT (!GB_IS_BITMAP (A)) ; #ifndef GB_DIAG_SELECTOR // if A is full, all opcodes except DIAG use the bitmap selector instead ASSERT (!GB_IS_FULL (A)) ; #endif //-------------------------------------------------------------------------- // C = select (A) //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_slice [tid] ; int64_t klast = klast_slice [tid] ; //---------------------------------------------------------------------- // selection from vectors kfirst to klast //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t pA_start, pA_end, pC ; GB_get_pA_and_pC (&pA_start, &pA_end, &pC, tid, k, kfirst, klast, pstart_slice, C_pstart_slice, Cp, avlen, Ap, avlen) ; //------------------------------------------------------------------ // compact Ai and Ax [pA_start ... pA_end-1] into Ci and Cx //------------------------------------------------------------------ #if defined ( GB_ENTRY_SELECTOR ) GB_GET_J ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // A is never full; that case is now handled by the // bitmap selector instead. // int64_t i = GBI (Ai, pA, avlen) ; ASSERT (Ai != NULL) ; int64_t i = Ai [pA] ; if (GB_TEST_VALUE_OF_ENTRY (pA)) { ASSERT (pC >= Cp [k] && pC < Cp [k+1]) ; Ci [pC] = i ; // Cx [pC] = Ax [pA] ; GB_SELECT_ENTRY (Cx, pC, Ax, pA) ; pC++ ; } } #elif defined ( GB_TRIU_SELECTOR ) \ || defined ( GB_RESIZE_SELECTOR ) // keep pA_start to Zp[k]-1 int64_t p = GB_IMIN (Zp [k], pA_end) ; int64_t mynz = p - pA_start ; if (mynz > 0) { ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; // if (Ai != NULL) { // A and C are both sparse or hypersparse memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ; } #if 0 else { // A is full and C is sparse: for triu: the bitmap // selector is used. For resize, A is converted to // hypersparse first. ASSERT (GB_DEAD_CODE) ; int64_t i_start = pA_start % avlen ; for (int64_t s = 0 ; s < mynz ; s++) { int64_t i = i_start + s ; ASSERT (GBI (Ai, pA_start+s, avlen) == i) ; Ci [pC+s] = i ; } } #endif memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ; } #elif defined ( GB_DIAG_SELECTOR ) // task that owns the diagonal entry does this work // A can be sparse or full, but not bitmap int64_t p = Zp [k] ; if (pA_start <= p && p < pA_end) { ASSERT (pC >= Cp [k] && pC + 1 <= Cp [k+1]) ; Ci [pC] = GBI (Ai, p, avlen) ; memcpy (Cx +pC*asize, Ax +p*asize, asize) ; } #elif defined ( GB_OFFDIAG_SELECTOR ) // keep pA_start to Zp[k]-1 int64_t p = GB_IMIN (Zp [k], pA_end) ; int64_t mynz = p - pA_start ; if (mynz > 0) { ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; // if (Ai != NULL) { // A and C are both sparse or hypersparse memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ; } #if 0 else { // A is full and C is sparse or hypersparse: // this is now always handled by the bitmap selector ASSERT (GB_DEAD_CODE) ; int64_t i_start = pA_start % avlen ; for (int64_t s = 0 ; s < mynz ; s++) { int64_t i = i_start + s ; ASSERT (GBI (Ai, pA_start+s, avlen) == i) ; Ci [pC+s] = i ; } } #endif memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ; pC += mynz ; } // keep Zp[k]+1 to pA_end-1 p = GB_IMAX (Zp [k]+1, pA_start) ; mynz = pA_end - p ; if (mynz > 0) { ASSERT (pA_start <= p && p < pA_end) ; ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; // if (Ai != NULL) { // A and C are both sparse or hypersparse memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ; } #if 0 else { // A is full and C is sparse or hypersparse ASSERT (GB_DEAD_CODE) ; int64_t i_start = p % avlen ; for (int64_t s = 0 ; s < mynz ; s++) { int64_t i = i_start + s ; ASSERT (GBI (Ai, p+s, avlen) == i) ; Ci [pC+s] = i ; } } #endif memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ; } #elif defined ( GB_TRIL_SELECTOR ) // keep Zp [k] to pA_end-1 int64_t p = GB_IMAX (Zp [k], pA_start) ; int64_t mynz = pA_end - p ; if (mynz > 0) { ASSERT (pA_start <= p && p + mynz <= pA_end) ; ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; // if (Ai != NULL) { // A and C are both sparse or hypersparse memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ; } #if 0 else { // A is full and C is sparse or hypersparse: // this is now always handled by the bitmap selector ASSERT (GB_DEAD_CODE) ; int64_t i_start = p % avlen ; for (int64_t s = 0 ; s < mynz ; s++) { int64_t i = i_start + s ; ASSERT (GBI (Ai, p+s, avlen) == i) ; Ci [pC+s] = i ; } } #endif memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ; } #endif } } }
omp_barrier.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int test_omp_barrier() { int result1; int result2; result1 = 0; result2 = 0; #pragma omp parallel { int rank; rank = omp_get_thread_num (); if (rank ==1) { my_sleep(SLEEPTIME); result2 = 3; } #pragma omp barrier if (rank == 2) { result1 = result2; } } return (result1 == 3); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_barrier()) { num_failed++; } } return num_failed; }
hill_climbing_engine.h
//===------------------------------------------------------------*- C++ -*-===// // // Ripples: A C++ Library for Influence Maximization // Marco Minutoli <marco.minutoli@pnnl.gov> // Pacific Northwest National Laboratory // //===----------------------------------------------------------------------===// // // Copyright (c) 2019, Battelle Memorial Institute // // Battelle Memorial Institute (hereinafter Battelle) hereby grants permission // to any person or entity lawfully obtaining a copy of this software and // associated documentation files (hereinafter “the Software”) to redistribute // and use the Software in source and binary forms, with or without // modification. Such person or entity may use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and may permit // others to do so, subject to the following conditions: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Other than as used herein, neither the name Battelle Memorial Institute or // Battelle may be used in any form whatsoever without the express written // consent of Battelle. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //===----------------------------------------------------------------------===// #ifndef RIPPLES_HILL_CLIMBING_ENGINE_H #define RIPPLES_HILL_CLIMBING_ENGINE_H #include <algorithm> #include <atomic> #include <cstdint> #include <memory> #include <vector> #include "omp.h" #include "spdlog/sinks/stdout_color_sinks.h" #include "spdlog/spdlog.h" #include "trng/uniform01_dist.hpp" #include "ripples/bitmask.h" #ifdef RIPPLES_ENABLE_CUDA #include "ripples/cuda/cuda_generate_rrr_sets.h" #include "ripples/cuda/cuda_graph.cuh" #include "ripples/cuda/cuda_hc_engine.h" #include "ripples/cuda/cuda_utils.h" #include "ripples/cuda/from_nvgraph/hc/bfs.hxx" #endif namespace ripples { //! Engine scheduling dynamically sampling tasks for the Hill Climbing. //! //! \tparam GraphTy The type of the input graph. //! \tparam ItrTy The type of the workload iterator. template <typename GraphTy, typename ItrTy> class HCWorker { public: using ex_time_ms = std::chrono::duration<double, std::milli>; //! Construct the Sampling worker. //! \param G The input Graph. HCWorker(const GraphTy &G) : G_(G) {} //! Destructor. virtual ~HCWorker() = default; virtual void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) = 0; protected: const GraphTy &G_; }; template <typename GraphTy, typename ItrTy, typename PRNG, typename diff_model_tag> class HCCPUSamplingWorker : public HCWorker<GraphTy, ItrTy> { using vertex_type = typename GraphTy::vertex_type; using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; HCCPUSamplingWorker(const GraphTy &G, const PRNG &rng) : HCWorker<GraphTy, ItrTy>(G), rng_(rng), UD_() {} void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } private: void batch(ItrTy B, ItrTy E) { for (; B != E; ++B) { size_t edge_number = 0; if (std::is_same<diff_model_tag, independent_cascade_tag>::value) { for (vertex_type v = 0; v < G_.num_nodes(); ++v) { for (auto &e : G_.neighbors(v)) { // (*B)[edge_number] = UD_(rng_) <= e.weight ? 1 : 0; if (UD_(rng_) <= e.weight) B->set(edge_number); ++edge_number; } } } else if (std::is_same<diff_model_tag, linear_threshold_tag>::value) { for (vertex_type v = 0; v < G_.num_nodes(); ++v) { double threshold = UD_(rng_); for (auto &e : G_.neighbors(v)) { threshold -= e.weight; if (threshold <= 0) B->set(edge_number); // (*B)[edge_number] = threshold <= 0 ? 1 : 0; ++edge_number; } } } } } static constexpr size_t batch_size_ = 32; PRNG rng_; trng::uniform01_dist<float> UD_; }; template <typename GraphTy, typename ItrTy, typename PRNGTy, typename diff_model_tag> class HCGPUSamplingWorker : public HCWorker<GraphTy, ItrTy> { #ifdef RIPPLES_ENABLE_CUDA using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; struct config_t { static constexpr size_t block_size_ = 256; static constexpr size_t num_threads_ = 1 << 15; size_t max_blocks_{0}; config_t() : max_blocks_(num_threads_ / block_size_) {} size_t num_gpu_threads() const { return num_threads_; } }; HCGPUSamplingWorker(const GraphTy &G, PRNGTy &rng, cuda_ctx<GraphTy> *ctx) : HCWorker<GraphTy, ItrTy>(G), ctx_(ctx), conf_(), master_rng_(rng) { cuda_set_device(ctx_->gpu_id); cuda_stream_create(&cuda_stream_); cuda_malloc((void **)&d_trng_state_, conf_.num_gpu_threads() * sizeof(PRNGTy)); cuda_malloc((void **)&d_flags_, ((G.num_edges() / (8 * sizeof(int)) + 1) * sizeof(int) * batch_size_)); } ~HCGPUSamplingWorker() { cuda_set_device(ctx_->gpu_id); cuda_stream_destroy(cuda_stream_); cuda_free(d_trng_state_); cuda_free(d_flags_); } void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } void rng_setup() { cuda_set_device(ctx_->gpu_id); cuda_lt_rng_setup(d_trng_state_, master_rng_, conf_.num_gpu_threads(), 0, conf_.max_blocks_, conf_.block_size_); } private: void batch(ItrTy B, ItrTy E) { cuda_set_device(ctx_->gpu_id); if (std::is_same<diff_model_tag, independent_cascade_tag>::value) { cuda_generate_samples_ic(conf_.max_blocks_, conf_.block_size_, batch_size_, G_.num_edges(), d_trng_state_, ctx_, d_flags_, cuda_stream_); } else if (std::is_same<diff_model_tag, linear_threshold_tag>::value) { assert(false && "Not Yet Implemented"); } for (size_t i = 0; B < E; ++B, ++i) { cuda_d2h(B->data(), d_flags_ + i * (B->bytes() / sizeof(int)), B->bytes(), cuda_stream_); } cuda_sync(cuda_stream_); } static constexpr size_t batch_size_ = 32; cuda_ctx<GraphTy> *ctx_; config_t conf_; PRNGTy master_rng_; cudaStream_t cuda_stream_; trng::uniform01_dist<float> UD_; PRNGTy *d_trng_state_; int *d_flags_; #endif }; template <typename GraphTy, typename ItrTy, typename PRNGTy, typename diff_model_tag, typename CpuWorkerTy, typename GpuWorkerTy> class PhaseEngine { using vertex_type = typename GraphTy::vertex_type; using worker_type = HCWorker<GraphTy, ItrTy>; using cpu_worker_type = CpuWorkerTy; using gpu_worker_type = GpuWorkerTy; public: using ex_time_ms = std::chrono::duration<double, std::milli>; PhaseEngine(const GraphTy &G, PRNGTy &master_rng, size_t cpu_workers, size_t gpu_workers, std::string loggerName) : G_(G), logger_(spdlog::stdout_color_mt(loggerName)) { size_t num_threads = cpu_workers + gpu_workers; // Construct workers. logger_->debug("Number of Threads = {}", num_threads); workers_.resize(num_threads); cpu_workers_.resize(cpu_workers); #if RIPPLES_ENABLE_CUDA gpu_workers_.resize(gpu_workers); cuda_contexts_.resize(gpu_workers); #endif #pragma omp parallel { int rank = omp_get_thread_num(); if (rank < cpu_workers) { auto rng = master_rng; rng.split(num_threads, rank); auto w = new cpu_worker_type(G_, rng); workers_[rank] = w; cpu_workers_[rank] = w; logger_->debug("> mapping: omp {}\t->CPU", rank); } else { #if RIPPLES_ENABLE_CUDA size_t num_devices = cuda_num_devices(); size_t device_id = rank % num_devices; logger_->debug("> mapping: omp {}\t->GPU {}/{}", rank, device_id, num_devices); logger_->trace("Building Cuda Context"); cuda_contexts_[rank - cpu_workers] = cuda_make_ctx(G, device_id); auto rng = master_rng; rng.split(num_threads, rank); auto w = new gpu_worker_type(G_, rng, cuda_contexts_[rank - cpu_workers]); w->rng_setup(); workers_[rank] = w; gpu_workers_[rank - cpu_workers] = w; logger_->trace("Cuda Context Built!"); #endif } } } ~PhaseEngine() { // Free workers. for (auto &v : workers_) delete v; #if RIPPLES_ENABLE_CUDA for (auto ctx : cuda_contexts_) { cuda_set_device(ctx->gpu_id); cuda_destroy_ctx(ctx); delete ctx; } #endif } protected: const GraphTy &G_; std::shared_ptr<spdlog::logger> logger_; std::vector<cpu_worker_type *> cpu_workers_; #if RIPPLES_ENABLE_CUDA std::vector<gpu_worker_type *> gpu_workers_; std::vector<cuda_ctx<GraphTy> *> cuda_contexts_; #endif std::vector<worker_type *> workers_; std::atomic<size_t> mpmc_head_{0}; }; template <typename GraphTy, typename ItrTy, typename PRNGTy, typename diff_model_tag> class SamplingEngine : public PhaseEngine< GraphTy, ItrTy, PRNGTy, diff_model_tag, HCCPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>, HCGPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>> { using phase_engine = PhaseEngine<GraphTy, ItrTy, PRNGTy, diff_model_tag, HCCPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>, HCGPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>>; using ex_time_ms = std::chrono::duration<double, std::milli>; public: SamplingEngine(const GraphTy &G, PRNGTy &master_rng, size_t cpu_workers, size_t gpu_workers) : phase_engine(G, master_rng, cpu_workers, gpu_workers, "SamplingEngine") {} void exec(ItrTy B, ItrTy E, std::vector<std::vector<ex_time_ms>> &record) { record.resize(workers_.size()); mpmc_head_.store(0); logger_->trace("Start Sampling"); #pragma omp parallel { assert(workers_.size() == omp_get_num_threads()); size_t rank = omp_get_thread_num(); workers_[rank]->svc_loop(mpmc_head_, B, E, record[rank]); } logger_->trace("End Sampling"); } private: using phase_engine::logger_; using phase_engine::mpmc_head_; using phase_engine::workers_; }; namespace { template <typename GraphTy, typename GraphMaskTy, typename Itr> size_t BFS(GraphTy &G, GraphMaskTy &M, Itr b, Itr e, Bitmask<int> &visited) { using vertex_type = typename GraphTy::vertex_type; std::queue<vertex_type> queue; for (; b != e; ++b) { queue.push(*b); } while (!queue.empty()) { vertex_type u = queue.front(); queue.pop(); visited.set(u); size_t edge_number = std::distance(G.neighbors(0).begin(), G.neighbors(u).begin()); for (auto v : G.neighbors(u)) { if (M.get(edge_number) && !visited.get(v.vertex)) { queue.push(v.vertex); } ++edge_number; } } return visited.popcount(); } template <typename GraphTy, typename GraphMaskTy> size_t BFS(GraphTy &G, GraphMaskTy &M, typename GraphTy::vertex_type v, Bitmask<int> visited) { using vertex_type = typename GraphTy::vertex_type; std::queue<vertex_type> queue; queue.push(v); visited.set(v); while (!queue.empty()) { vertex_type u = queue.front(); queue.pop(); size_t edge_number = std::distance(G.neighbors(0).begin(), G.neighbors(u).begin()); for (auto v : G.neighbors(u)) { if (M.get(edge_number) && !visited.get(v.vertex)) { queue.push(v.vertex); visited.set(v.vertex); } ++edge_number; } } return visited.popcount(); } } // namespace template <typename GraphTy, typename ItrTy> class HCCPUCountingWorker : public HCWorker<GraphTy, ItrTy> { using vertex_type = typename GraphTy::vertex_type; using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; HCCPUCountingWorker(const GraphTy &G, std::vector<size_t> &count, const std::set<vertex_type> &S) : HCWorker<GraphTy, ItrTy>(G), count_(count), S_(S) {} void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } private: void batch(ItrTy B, ItrTy E) { for (auto itr = B; itr < E; ++itr) { Bitmask<int> visited(G_.num_nodes()); size_t base_count = BFS(G_, *itr, S_.begin(), S_.end(), visited); for (vertex_type v = 0; v < G_.num_nodes(); ++v) { if (S_.find(v) != S_.end()) continue; size_t update_count = base_count + 1; if (!visited.get(v)) { update_count = BFS(G_, *itr, v, visited); } #pragma omp atomic count_[v] += update_count; } } } static constexpr size_t batch_size_ = 2; std::vector<size_t> &count_; const std::set<vertex_type> &S_; }; template <typename GraphTy, typename ItrTy> class HCGPUCountingWorker : public HCWorker<GraphTy, ItrTy> { #ifdef RIPPLES_ENABLE_CUDA using vertex_type = typename GraphTy::vertex_type; using d_vertex_type = typename cuda_device_graph<GraphTy>::vertex_t; using bfs_solver_t = nvgraph::Bfs<int>; using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; struct config_t { config_t(size_t num_workers) : block_size_(bfs_solver_t::traverse_block_size()), max_blocks_(num_workers ? cuda_max_blocks() / num_workers : 0) { auto console = spdlog::get("console"); console->trace( "> [GPUWalkWorkerIC::config_t] " "max_blocks_={}\tblock_size_={}", max_blocks_, block_size_); } size_t num_gpu_threads() const { return max_blocks_ * block_size_; } const size_t max_blocks_; const size_t block_size_; }; HCGPUCountingWorker(const config_t &conf, const GraphTy &G, cuda_ctx<GraphTy> *ctx, std::vector<size_t> &count, const std::set<vertex_type> &S) : HCWorker<GraphTy, ItrTy>(G), conf_(conf), ctx_(ctx), count_(count), S_(S), edge_filter_(new d_vertex_type[G_.num_edges()]) { cuda_set_device(ctx_->gpu_id); cuda_stream_create(&cuda_stream_); // allocate host/device memory Bitmask<int> _(G_.num_edges()); cuda_malloc((void **)&d_edge_filter_, _.bytes()); // create the solver solver_ = new bfs_solver_t(this->G_.num_nodes(), this->G_.num_edges(), cuda_graph_index(ctx_), cuda_graph_edges(ctx_), cuda_graph_weights(ctx_), true, TRAVERSAL_DEFAULT_ALPHA, TRAVERSAL_DEFAULT_BETA, conf_.max_blocks_, cuda_stream_); solver_->configure(nullptr, nullptr, d_edge_filter_); visited_ = std::unique_ptr<int[]>(new int[solver_->bmap_size()]); cuda_sync(cuda_stream_); } ~HCGPUCountingWorker() { cuda_set_device(ctx_->gpu_id); delete solver_; cuda_stream_destroy(cuda_stream_); // free host/device memory cuda_free(d_edge_filter_); } void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } private: void batch(ItrTy B, ItrTy E) { std::vector<d_vertex_type> seeds(S_.begin(), S_.end()); for (auto itr = B; itr < E; ++itr) { cuda_h2d(d_edge_filter_, itr->data(), itr->bytes(), cuda_stream_); d_vertex_type base_count; solver_->traverse(seeds.data(), seeds.size(), visited_.get(), &base_count); // cuda_d2h(predecessors_, d_predecessors_, // G_.num_nodes() * sizeof(d_vertex_type), cuda_stream_); cuda_sync(cuda_stream_); for (vertex_type v = 0; v < G_.num_nodes(); ++v) { if (S_.find(v) != S_.end()) continue; size_t update_count = base_count + 1; int m = 1 << (v % (8 * sizeof(int))); if ((visited_[v / (8 * sizeof(int))] && m) == 0) { d_vertex_type count; solver_->traverse(v, base_count, visited_.get(), &count); cuda_sync(cuda_stream_); update_count = count; } #pragma omp atomic count_[v] += update_count; } } } static constexpr size_t batch_size_ = 2; config_t conf_; cuda_ctx<GraphTy> *ctx_; cudaStream_t cuda_stream_; bfs_solver_t *solver_; std::unique_ptr<d_vertex_type[]> edge_filter_; std::unique_ptr<int[]> visited_; d_vertex_type *d_edge_filter_; std::vector<size_t> &count_; const std::set<vertex_type> &S_; #endif }; template <typename GraphTy, typename ItrTy> class SeedSelectionEngine { using vertex_type = typename GraphTy::vertex_type; using worker_type = HCWorker<GraphTy, ItrTy>; using cpu_worker_type = HCCPUCountingWorker<GraphTy, ItrTy>; using gpu_worker_type = HCGPUCountingWorker<GraphTy, ItrTy>; public: using ex_time_ms = std::chrono::duration<double, std::milli>; SeedSelectionEngine(const GraphTy &G, size_t cpu_workers, size_t gpu_workers) : G_(G), count_(G_.num_nodes()), S_(), logger_(spdlog::stdout_color_mt("SeedSelectionEngine")) { size_t num_threads = cpu_workers + gpu_workers; // Construct workers. logger_->debug("Number of Threads = {}", num_threads); workers_.resize(num_threads); cpu_workers_.resize(cpu_workers); #if RIPPLES_ENABLE_CUDA gpu_workers_.resize(gpu_workers); cuda_contexts_.resize(gpu_workers); #endif #pragma omp parallel { int rank = omp_get_thread_num(); if (rank < cpu_workers) { auto w = new cpu_worker_type(G_, count_, S_); workers_[rank] = w; cpu_workers_[rank] = w; logger_->debug("> mapping: omp {}\t->CPU", rank); } else { #if RIPPLES_ENABLE_CUDA size_t num_devices = cuda_num_devices(); size_t device_id = rank % num_devices; logger_->debug("> mapping: omp {}\t->GPU {}/{}", rank, device_id, num_devices); logger_->trace("Building Cuda Context"); cuda_contexts_[rank - cpu_workers] = cuda_make_ctx(G, device_id); typename gpu_worker_type::config_t gpu_conf(gpu_workers); auto w = new gpu_worker_type(gpu_conf, G_, cuda_contexts_.back(), count_, S_); workers_[rank] = w; gpu_workers_[rank - cpu_workers] = w; logger_->trace("Cuda Context Built!"); #endif } } } ~SeedSelectionEngine() { // Free workers. for (auto &v : workers_) delete v; #if RIPPLES_ENABLE_CUDA for (auto ctx : cuda_contexts_) { cuda_set_device(ctx->gpu_id); cuda_destroy_ctx(ctx); delete ctx; } #endif } std::vector<vertex_type> exec(ItrTy B, ItrTy E, size_t k, std::vector<std::vector<ex_time_ms>> &record) { logger_->trace("Start Seed Selection"); record.resize(workers_.size()); std::vector<vertex_type> result; result.reserve(k); for (size_t i = 0; i < k; ++i) { #pragma omp parallel for for (size_t j = 0; j < count_.size(); ++j) count_[j] = 0; mpmc_head_.store(0); #pragma omp parallel { assert(workers_.size() == omp_get_num_threads()); size_t rank = omp_get_thread_num(); workers_[rank]->svc_loop(mpmc_head_, B, E, record[rank]); } auto itr = std::max_element(count_.begin(), count_.end()); vertex_type v = std::distance(count_.begin(), itr); S_.insert(v); result.push_back(v); logger_->trace("Seed {} : {}[{}] = {}", i, v, G_.convertID(v), *itr); } logger_->trace("End Seed Selection"); return result; } private: const GraphTy &G_; std::vector<size_t> count_; std::set<vertex_type> S_; // size_t gpu_workers_; // size_t cpu_workers_; std::shared_ptr<spdlog::logger> logger_; std::vector<cpu_worker_type *> cpu_workers_; #if RIPPLES_ENABLE_CUDA std::vector<gpu_worker_type *> gpu_workers_; std::vector<cuda_ctx<GraphTy> *> cuda_contexts_; #endif std::vector<worker_type *> workers_; std::atomic<size_t> mpmc_head_{0}; }; } // namespace ripples #endif
GB_unop__identity_int64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int64_fc64 // op(A') function: GB_unop_tran__identity_int64_fc64 // C type: int64_t // A type: GxB_FC64_t // cast: int64_t cij = GB_cast_to_int64_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = GB_cast_to_int64_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = GB_cast_to_int64_t (creal (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int64_fc64 ( int64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; int64_t z = GB_cast_to_int64_t (creal (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; int64_t z = GB_cast_to_int64_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_uint8 // op(A') function: GB_tran__lnot_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_uint8 ( int16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
update_ops_dm.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "constant.h" #include "update_ops_dm.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif void dm_normalize(double squared_norm, CTYPE* state, ITYPE dim) { const ITYPE loop_dim = dim; const double normalize_factor = 1. / squared_norm; ITYPE state_index_y; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) { ITYPE state_index_x; for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { state[state_index_y * dim + state_index_x] *= normalize_factor; } } } void dm_single_qubit_dense_matrix_gate(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // loop variables const ITYPE loop_dim = dim / 2; // create extended matrix CTYPE ext_matrix[16]; for (int y = 0; y < 4; ++y) { int y1 = y / 2; int y2 = y % 2; for (int x = 0; x < 4; ++x) { int x1 = x / 2; int x2 = x % 2; ext_matrix[y * 4 + x] = matrix[y1 * 2 + x1] * conj(matrix[y2 * 2 + x2]); } } ITYPE state_index_x, state_index_y; #ifdef _OPENMP #pragma omp parallel for private(state_index_x) #endif for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) { // create vertical index ITYPE basis_0_y = insert_zero_to_basis_index(state_index_y, target_mask, target_qubit_index); // flip target bit ITYPE basis_1_y = basis_0_y ^ target_mask; for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create vertical index ITYPE basis_0_x = insert_zero_to_basis_index(state_index_x, target_mask, target_qubit_index); // flip target bit ITYPE basis_1_x = basis_0_x ^ target_mask; ITYPE basis_00 = basis_0_y * dim + basis_0_x; ITYPE basis_01 = basis_0_y * dim + basis_1_x; ITYPE basis_10 = basis_1_y * dim + basis_0_x; ITYPE basis_11 = basis_1_y * dim + basis_1_x; // fetch values CTYPE cval_00 = state[basis_00]; CTYPE cval_01 = state[basis_01]; CTYPE cval_10 = state[basis_10]; CTYPE cval_11 = state[basis_11]; // set values state[basis_00] = ext_matrix[0] * cval_00 + ext_matrix[1] * cval_01 + ext_matrix[2] * cval_10 + ext_matrix[3] * cval_11; state[basis_01] = ext_matrix[4] * cval_00 + ext_matrix[5] * cval_01 + ext_matrix[6] * cval_10 + ext_matrix[7] * cval_11; state[basis_10] = ext_matrix[8] * cval_00 + ext_matrix[9] * cval_01 + ext_matrix[10] * cval_10 + ext_matrix[11] * cval_11; state[basis_11] = ext_matrix[12] * cval_00 + ext_matrix[13] * cval_01 + ext_matrix[14] * cval_10 + ext_matrix[15] * cval_11; } } } void dm_multi_qubit_control_single_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; UINT* insert_index_list = create_sorted_ui_list_value(control_qubit_index_list, control_qubit_index_count, target_qubit_index); // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> insert_index_list_count; CTYPE adjoint_matrix[4]; adjoint_matrix[0] = conj(matrix[0]); adjoint_matrix[1] = conj(matrix[2]); adjoint_matrix[2] = conj(matrix[1]); adjoint_matrix[3] = conj(matrix[3]); ITYPE state_index_x, state_index_y; #ifdef _OPENMP #pragma omp parallel for private(state_index_y) #endif for (state_index_x = 0; state_index_x < dim; ++state_index_x) { for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) { // create base index ITYPE basis_c_t0_y = state_index_y; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_c_t0_y = insert_zero_to_basis_index(basis_c_t0_y, 1ULL << insert_index_list[cursor], insert_index_list[cursor]); } // flip controls basis_c_t0_y ^= control_mask; // gather target ITYPE basis_c_t1_y = basis_c_t0_y ^ target_mask; // set index ITYPE basis_0 = basis_c_t0_y * dim + state_index_x; ITYPE basis_1 = basis_c_t1_y * dim + state_index_x; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #ifdef _OPENMP #pragma omp parallel for private(state_index_x) #endif for (state_index_y = 0; state_index_y < dim; ++state_index_y) { for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create base index ITYPE basis_c_t0_x = state_index_x; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_c_t0_x = insert_zero_to_basis_index(basis_c_t0_x, 1ULL << insert_index_list[cursor], insert_index_list[cursor]); } // flip controls basis_c_t0_x ^= control_mask; // gather target ITYPE basis_c_t1_x = basis_c_t0_x ^ target_mask; // set index ITYPE basis_0 = state_index_y * dim + basis_c_t0_x; ITYPE basis_1 = state_index_y * dim + basis_c_t1_x; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = cval_0 * adjoint_matrix[0] + cval_1 * adjoint_matrix[2]; state[basis_1] = cval_0 * adjoint_matrix[1] + cval_1 * adjoint_matrix[3]; } } free(insert_index_list); } /* // inefficient implementation void dm_multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // create extended matrix const ITYPE ext_matrix_dim = matrix_dim*matrix_dim; CTYPE* ext_matrix = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*(ext_matrix_dim * ext_matrix_dim))); for (ITYPE y = 0; y < ext_matrix_dim; ++y) { ITYPE y1 = y / matrix_dim; ITYPE y2 = y % matrix_dim; for (ITYPE x = 0; x < ext_matrix_dim; ++x) { ITYPE x1 = x / matrix_dim; ITYPE x2 = x % matrix_dim; ext_matrix[y*ext_matrix_dim + x] = matrix[y1*matrix_dim + x1] * conj(matrix[y2*matrix_dim + x2]); } } // insert index const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> target_qubit_index_count; #ifndef _OPENMP CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*ext_matrix_dim)); ITYPE state_index_y; for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) { // create base index ITYPE basis_0_y = state_index_y; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index); } ITYPE state_index_x; for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create base index ITYPE basis_0_x = state_index_x; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index); } // compute matrix-vector multiply for (ITYPE y = 0; y < ext_matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < ext_matrix_dim; ++x) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x%matrix_dim]; ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x/matrix_dim]; buffer[y] += ext_matrix[y*ext_matrix_dim + x] * state[ dm_index_y * dim + dm_index_x]; } } // set result for (ITYPE y = 0; y < ext_matrix_dim; ++y) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y % matrix_dim]; ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y / matrix_dim]; state[dm_index_y * dim + dm_index_x] = buffer[y]; } } } free(buffer); #else const UINT thread_count = omp_get_max_threads(); CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*ext_matrix_dim*thread_count)); const ITYPE block_size = loop_dim / thread_count; const ITYPE residual = loop_dim % thread_count; #pragma omp parallel { UINT thread_id = omp_get_thread_num(); ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual); ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual); CTYPE* buffer = buffer_list + thread_id * ext_matrix_dim; ITYPE state_index_y; for (state_index_y = start_index; state_index_y < end_index; ++state_index_y) { // create base index ITYPE basis_0_y = state_index_y; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index); } ITYPE state_index_x; for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create base index ITYPE basis_0_x = state_index_x; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index); } // compute matrix-vector multiply for (ITYPE y = 0; y < ext_matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < ext_matrix_dim; ++x) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x%matrix_dim]; ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x / matrix_dim]; buffer[y] += ext_matrix[y*ext_matrix_dim + x] * state[dm_index_y * dim + dm_index_x]; } } // set result for (ITYPE y = 0; y < ext_matrix_dim; ++y) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y % matrix_dim]; ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y / matrix_dim]; state[dm_index_y * dim + dm_index_x] = buffer[y]; } } } } free(buffer_list); #endif free(ext_matrix); free((UINT*)sorted_insert_index_list); free((ITYPE*)matrix_mask_list); } */ void dm_multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // create extended matrix CTYPE* adjoint_matrix = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*(matrix_dim * matrix_dim))); for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { adjoint_matrix[y*matrix_dim + x] = conj(matrix[x*matrix_dim + y]); } } // insert index const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> target_qubit_index_count; #ifndef _OPENMP CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*matrix_dim)); ITYPE state_index_y; for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) { // create base index ITYPE basis_0_y = state_index_y; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index); } ITYPE state_index_x; for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create base index ITYPE basis_0_x = state_index_x; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index); } // compute matrix-matrix multiply // TODO: improve matmul for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { buffer[y*matrix_dim + x] = 0; for (ITYPE k = 0; k < matrix_dim; ++k) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x]; ITYPE dm_index_k = basis_0_y ^ matrix_mask_list[k]; buffer[y*matrix_dim+x] += matrix[y*matrix_dim + k] * state[ dm_index_k * dim + dm_index_x]; } } } for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x]; ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y]; ITYPE dm_index = dm_index_y * dim + dm_index_x; state[dm_index] = 0; for (ITYPE k = 0; k < matrix_dim; ++k) { state[dm_index] += buffer[y*matrix_dim + k] * adjoint_matrix[k*matrix_dim + x]; } } } } } free(buffer); #else const UINT thread_count = omp_get_max_threads(); CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*matrix_dim*thread_count)); const ITYPE block_size = loop_dim / thread_count; const ITYPE residual = loop_dim % thread_count; #pragma omp parallel { UINT thread_id = omp_get_thread_num(); ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual); ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual); CTYPE* buffer = buffer_list + thread_id * matrix_dim*matrix_dim; ITYPE state_index_y; for (state_index_y = start_index; state_index_y < end_index; ++state_index_y) { // create base index ITYPE basis_0_y = state_index_y; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index); } ITYPE state_index_x; for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create base index ITYPE basis_0_x = state_index_x; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index); } // compute matrix-matrix multiply // TODO: improve matmul for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { buffer[y*matrix_dim + x] = 0; for (ITYPE k = 0; k < matrix_dim; ++k) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x]; ITYPE dm_index_k = basis_0_y ^ matrix_mask_list[k]; buffer[y*matrix_dim + x] += matrix[y*matrix_dim + k] * state[dm_index_k * dim + dm_index_x]; } } } for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x]; ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y]; ITYPE dm_index = dm_index_y * dim + dm_index_x; state[dm_index] = 0; for (ITYPE k = 0; k < matrix_dim; ++k) { state[dm_index] += buffer[y*matrix_dim + k] * adjoint_matrix[k*matrix_dim + x]; } } } } } } free(buffer_list); #endif free(adjoint_matrix); free((UINT*)sorted_insert_index_list); free((ITYPE*)matrix_mask_list); } void dm_multi_qubit_control_multi_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // insert index const UINT insert_index_count = target_qubit_index_count + control_qubit_index_count; UINT* sorted_insert_index_list = create_sorted_ui_list_list(target_qubit_index_list, target_qubit_index_count, control_qubit_index_list, control_qubit_index_count); // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop varaibles const ITYPE loop_dim = dim >> (target_qubit_index_count + control_qubit_index_count); CTYPE* adjoint_matrix = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*matrix_dim)); for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { adjoint_matrix[y*matrix_dim + x] = conj(matrix[x*matrix_dim + y]); } } #ifndef _OPENMP CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim)); ITYPE state_index_x, state_index_y; for (state_index_x = 0; state_index_x < dim; ++state_index_x) { for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) { // create base index ITYPE basis_0_y = state_index_y; for (UINT cursor = 0; cursor < insert_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index); } // flip control masks basis_0_y ^= control_mask; // compute matrix vector mul for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x]; buffer[y] += matrix[y*matrix_dim + x] * state[dm_index_y*dim + state_index_x]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y]; state[dm_index_y*dim + state_index_x] = buffer[y]; } } } for (state_index_y = 0; state_index_y < dim; ++state_index_y) { for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create base index ITYPE basis_0_x = state_index_x; for (UINT cursor = 0; cursor < insert_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index); } // flip control masks basis_0_x ^= control_mask; // compute matrix vector mul for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x]; buffer[y] += state[state_index_y*dim + dm_index_x] * adjoint_matrix[x*matrix_dim + y]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y]; state[state_index_y*dim + dm_index_x] = buffer[y]; } } } free(buffer); #else const UINT thread_count = omp_get_max_threads(); CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count)); const ITYPE block_size = dim / thread_count; const ITYPE residual = dim % thread_count; #pragma omp parallel { UINT thread_id = omp_get_thread_num(); ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual); ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual); CTYPE* buffer = buffer_list + thread_id * matrix_dim; ITYPE state_index_y, state_index_x; for (state_index_x = start_index; state_index_x < end_index; ++state_index_x) { for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) { // create base index ITYPE basis_0_y = state_index_y; for (UINT cursor = 0; cursor < insert_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index); } // flip control masks basis_0_y ^= control_mask; // compute matrix vector mul for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x]; buffer[y] += matrix[y*matrix_dim + x] * state[dm_index_y*dim + state_index_x]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y]; state[dm_index_y*dim + state_index_x] = buffer[y]; } } } #pragma omp barrier for (state_index_y = start_index; state_index_y < end_index; ++state_index_y) { for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) { // create base index ITYPE basis_0_x = state_index_x; for (UINT cursor = 0; cursor < insert_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index); } // flip control masks basis_0_x ^= control_mask; // compute matrix vector mul for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x]; buffer[y] += state[state_index_y*dim + dm_index_x] * adjoint_matrix[x*matrix_dim + y]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y]; state[state_index_y*dim + dm_index_x] = buffer[y]; } } } } free(buffer_list); #endif free(adjoint_matrix); free(sorted_insert_index_list); free(matrix_mask_list); } void dm_X_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, PAULI_MATRIX[1], state, dim); } void dm_Y_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, PAULI_MATRIX[2], state, dim); } void dm_Z_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, PAULI_MATRIX[3], state, dim); } void dm_S_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, S_GATE_MATRIX, state, dim); } void dm_Sdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, S_DAG_GATE_MATRIX, state, dim); } void dm_T_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim){ dm_single_qubit_dense_matrix_gate(target_qubit_index, T_GATE_MATRIX, state, dim); } void dm_Tdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, T_DAG_GATE_MATRIX, state, dim); } void dm_sqrtX_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_X_GATE_MATRIX, state, dim); } void dm_sqrtXdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_X_DAG_GATE_MATRIX, state, dim); } void dm_sqrtY_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_Y_GATE_MATRIX, state, dim); } void dm_sqrtYdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_Y_DAG_GATE_MATRIX, state, dim); } void dm_H_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, HADAMARD_MATRIX, state, dim); } void dm_P0_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, PROJ_0_MATRIX, state, dim); } void dm_P1_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { dm_single_qubit_dense_matrix_gate(target_qubit_index, PROJ_1_MATRIX, state, dim); } void dm_CNOT_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { UINT control_index_list[1]; UINT control_value_list[1]; control_index_list[0] = control_qubit_index; control_value_list[0] = 1; dm_multi_qubit_control_single_qubit_dense_matrix_gate(control_index_list, control_value_list, 1, target_qubit_index, PAULI_MATRIX[1], state, dim); } void dm_CZ_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { UINT control_index_list[1]; UINT control_value_list[1]; control_index_list[0] = control_qubit_index; control_value_list[0] = 1; dm_multi_qubit_control_single_qubit_dense_matrix_gate(control_index_list, control_value_list, 1, target_qubit_index, PAULI_MATRIX[3], state, dim); } void dm_SWAP_gate(UINT target_qubit_index_0, UINT target_qubit_index_1, CTYPE *state, ITYPE dim) { CTYPE matrix[16]; memset(matrix, 0, sizeof(CTYPE) * 16); matrix[0 * 4 + 0] = 1; matrix[1 * 4 + 2] = 1; matrix[2 * 4 + 1] = 1; matrix[3 * 4 + 3] = 1; UINT target_index[2]; target_index[0] = target_qubit_index_0; target_index[1] = target_qubit_index_1; dm_multi_qubit_dense_matrix_gate(target_index, 2, matrix, state, dim); } void dm_RX_gate(UINT target_qubit_index, double angle, CTYPE* state, ITYPE dim) { UINT i, j; CTYPE rotation_gate[4]; for (i = 0; i < 2; ++i) for (j = 0; j < 2; ++j) rotation_gate[i * 2 + j] = cos(angle / 2) * PAULI_MATRIX[0][i * 2 + j] + sin(angle / 2) * 1.0i * PAULI_MATRIX[1][i * 2 + j]; dm_single_qubit_dense_matrix_gate(target_qubit_index, rotation_gate, state, dim); } void dm_RY_gate(UINT target_qubit_index, double angle, CTYPE* state, ITYPE dim) { UINT i, j; CTYPE rotation_gate[4]; for (i = 0; i < 2; ++i) for (j = 0; j < 2; ++j) rotation_gate[i * 2 + j] = cos(angle / 2) * PAULI_MATRIX[0][i * 2 + j] + sin(angle / 2) * 1.0i * PAULI_MATRIX[2][i * 2 + j]; dm_single_qubit_dense_matrix_gate(target_qubit_index, rotation_gate, state, dim); } void dm_RZ_gate(UINT target_qubit_index, double angle, CTYPE* state, ITYPE dim) { UINT i, j; CTYPE rotation_gate[4]; for (i = 0; i < 2; ++i) for (j = 0; j < 2; ++j) rotation_gate[i * 2 + j] = cos(angle / 2) * PAULI_MATRIX[0][i * 2 + j] + sin(angle / 2) * 1.0i * PAULI_MATRIX[3][i * 2 + j]; dm_single_qubit_dense_matrix_gate(target_qubit_index, rotation_gate, state, dim); } void dm_multi_qubit_Pauli_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, CTYPE* state, ITYPE dim) { // TODO faster impl const ITYPE matrix_dim = 1ULL << target_qubit_index_count; CTYPE* matrix = (CTYPE*)malloc(sizeof(CTYPE)*matrix_dim*matrix_dim); for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { CTYPE coef = 1.0; for (UINT i = 0; i < target_qubit_index_count; ++i) { UINT xi = (x >> i) % 2; UINT yi = (y >> i) % 2; coef *= PAULI_MATRIX[Pauli_operator_type_list[i]][yi*2+xi]; } matrix[y*matrix_dim + x] = coef; } } dm_multi_qubit_dense_matrix_gate(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); free(matrix); } void dm_multi_qubit_Pauli_rotation_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, double angle, CTYPE* state, ITYPE dim) { // TODO faster impl const ITYPE matrix_dim = 1ULL << target_qubit_index_count; CTYPE* matrix = (CTYPE*)malloc(sizeof(CTYPE)*matrix_dim*matrix_dim); for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { CTYPE coef = 1.0; for (UINT i = 0; i < target_qubit_index_count; ++i) { UINT xi = (x >> i) % 2; UINT yi = (y >> i) % 2; coef *= PAULI_MATRIX[Pauli_operator_type_list[i]][yi*2+xi]; } if (y == x) { matrix[y*matrix_dim + x] = cos(angle / 2) *1.0 + 1.0i * sin(angle / 2)*coef; } else { matrix[y*matrix_dim + x] = 1.0i * sin(angle / 2)*coef; } } } dm_multi_qubit_dense_matrix_gate(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); free(matrix); }
Sum_N_numbers_mp_CS.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #define n 100000 int main() { double a[n], random_a; double sum=0, privatesum; float startTime, endTime,execTime; int i; srand(time(0)); startTime = omp_get_wtime(); #pragma omp parallel private (i,privatesum) shared (a, sum) { privatesum=0; #pragma omp for for(i=0;i<n;i++) { random_a = rand(); a[i] = i * random_a; for(int j=1;j<n;j++) privatesum = privatesum + a[i]; } #pragma omp critical { sum = sum + privatesum; } } endTime = omp_get_wtime(); execTime = endTime - startTime; printf("%f \n",execTime); return(0); }
ordered.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" #include <omp.h> int main() { #pragma omp ordered { print_current_address(1); print_ids(0); } print_current_address(2); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_wait_ordered: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_ordered: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_ordered: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] return 0; }
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; #if __aarch64__ kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(q / 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd64_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; float* output0 = out0.row(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 12, _out03); vst1q_f32(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-4a-inch/4a-36-outch/4b; #if __aarch64__ kernel_tm_pack4.create(2 * inch / 4, 36, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 36; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(q / 4); #endif for (int k = 0; k < 36; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd42_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, elemsize, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; float* output0 = out0.row(i * 4) + (j * 4) * 4; // TODO neon optimize for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 12, _out03); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* kptr = (const float*)kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v28.4s}, [%1] \n" // r08 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v20.4s, v24.4s, v8.s[0] \n" "fmla v21.4s, v24.4s, v10.s[0] \n" "fmla v22.4s, v24.4s, v12.s[0] \n" "fmla v23.4s, v24.4s, v14.s[0] \n" "fmla v20.4s, v25.4s, v8.s[1] \n" "fmla v21.4s, v25.4s, v10.s[1] \n" "fmla v22.4s, v25.4s, v12.s[1] \n" "fmla v23.4s, v25.4s, v14.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v8.s[2] \n" "fmla v21.4s, v26.4s, v10.s[2] \n" "fmla v22.4s, v26.4s, v12.s[2] \n" "fmla v23.4s, v26.4s, v14.s[2] \n" "fmla v20.4s, v27.4s, v8.s[3] \n" "fmla v21.4s, v27.4s, v10.s[3] \n" "fmla v22.4s, v27.4s, v12.s[3] \n" "fmla v23.4s, v27.4s, v14.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v28.4s}, [%2] \n" // r18 "fmla v20.4s, v16.4s, v9.s[0] \n" "fmla v21.4s, v16.4s, v11.s[0] \n" "fmla v22.4s, v16.4s, v13.s[0] \n" "fmla v23.4s, v16.4s, v15.s[0] \n" "fmla v20.4s, v17.4s, v9.s[1] \n" "fmla v21.4s, v17.4s, v11.s[1] \n" "fmla v22.4s, v17.4s, v13.s[1] \n" "fmla v23.4s, v17.4s, v15.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v9.s[2] \n" "fmla v21.4s, v18.4s, v11.s[2] \n" "fmla v22.4s, v18.4s, v13.s[2] \n" "fmla v23.4s, v18.4s, v15.s[2] \n" "fmla v20.4s, v19.4s, v9.s[3] \n" "fmla v21.4s, v19.4s, v11.s[3] \n" "fmla v22.4s, v19.4s, v13.s[3] \n" "fmla v23.4s, v19.4s, v15.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v20.4s, v24.4s, v10.s[0] \n" "fmla v21.4s, v24.4s, v12.s[0] \n" "fmla v22.4s, v24.4s, v14.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v10.s[1] \n" "fmla v21.4s, v25.4s, v12.s[1] \n" "fmla v22.4s, v25.4s, v14.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v10.s[2] \n" "fmla v21.4s, v26.4s, v12.s[2] \n" "fmla v22.4s, v26.4s, v14.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v10.s[3] \n" "fmla v21.4s, v27.4s, v12.s[3] \n" "fmla v22.4s, v27.4s, v14.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v28.4s}, [%3] \n" // r28 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // r04 r05 r06 r07 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" // r08 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%2, #512] \n" "vldm %2!, {d8-d15} \n" // r10 r11 r12 r13 "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r14 r15 r16 r17 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r18 "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d12[0] \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d12[1] \n" "vmla.f32 q13, q9, d0[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d13[0] \n" "vmla.f32 q13, q10, d1[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d13[1] \n" "vmla.f32 q13, q11, d1[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" // r24 r25 r26 r27 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3 :128] \n" // r28 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1] \n" // r04 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v4.4s}, [%2] \n" // r14 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3] \n" // r24 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128] \n" // r04 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r10 r11 r12 r13 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r14 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3 :128] \n" // r24 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12 "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22 "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v5.s[3] \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "add %1, %1, #32 \n" "fadd v22.4s, v21.4s, v22.4s \n" "add %2, %2, #32 \n" "fadd v23.4s, v23.4s, v22.4s \n" "add %3, %3, #32 \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n" // sum0 "pld [%1, #384] \n" "vldm %1, {d0-d5} \n" // r00 r01 r02 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q13, q8, d0[0] \n" "vmul.f32 q14, q9, d0[1] \n" "vmul.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%2, #384] \n" "vldm %2, {d0-d5} \n" // r10 r11 r12 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%3, #384] \n" "vldm %3, {d0-d5} \n" // r20 r21 r22 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vadd.f32 q14, q14, q13 \n" "add %1, %1, #32 \n" "vadd.f32 q15, q15, q14 \n" "add %2, %2, #32 \n" "vadd.f32 q12, q12, q15 \n" "add %3, %3, #32 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } }
density.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file density.h * * \brief Contains definition and partial implementation of sirius::Density class. */ #ifndef __DENSITY_H__ #define __DENSITY_H__ #include "periodic_function.h" #include "k_point_set.h" #include "simulation_context.h" #include "mixer.h" #ifdef __GPU extern "C" void generate_dm_pw_gpu(int num_atoms__, int num_gvec_loc__, int num_beta__, double const* atom_pos__, int const* gvec__, double* phase_factors__, double const* dm__, double* dm_pw__, int stream_id__); extern "C" void sum_q_pw_dm_pw_gpu(int num_gvec_loc__, int nbf__, double const* q_pw__, double const* dm_pw__, double const* sym_weight__, double_complex* rho_pw__, int stream_id__); extern "C" void update_density_rg_1_gpu(int size__, double_complex const* psi_rg__, double wt__, double* density_rg__); extern "C" void update_density_rg_2_gpu(int size__, double_complex const* psi_rg_up__, double_complex const* psi_rg_dn__, double wt__, double* density_x_rg__, double* density_y_rg__); #endif namespace sirius { /// Generate charge density and magnetization from occupied spinor wave-functions. /** Let's start from the definition of the complex density matrix: * \f[ * \rho_{\sigma' \sigma}({\bf r}) = * \sum_{j{\bf k}} n_{j{\bf k}} \Psi_{j{\bf k}}^{\sigma*}({\bf r}) \Psi_{j{\bf k}}^{\sigma'}({\bf r}) = * \frac{1}{2} \left( \begin{array}{cc} \rho({\bf r})+m_z({\bf r}) & * m_x({\bf r})-im_y({\bf r}) \\ m_x({\bf r})+im_y({\bf r}) & \rho({\bf r})-m_z({\bf r}) \end{array} \right) * \f] * We notice that the diagonal components of the density matrix are actually real and the off-diagonal components are * expressed trough two independent functions \f$ m_x({\bf r}) \f$ and \f$ m_y({\bf r}) \f$. Having this in mind we * will work with a slightly different object, namely a real density matrix, defined as a 1-, 2- or 4-dimensional * (depending on the number of magnetic components) vector with the following elements: * - \f$ [ \rho({\bf r}) ] \f$ in case of non-magnetic configuration * - \f$ [ \rho_{\uparrow \uparrow}({\bf r}), \rho_{\downarrow \downarrow}({\bf r}) ] = * [ \frac{\rho({\bf r})+m_z({\bf r})}{2}, \frac{\rho({\bf r})-m_z({\bf r})}{2} ] \f$ in case of collinear * magnetic configuration * - \f$ [ \rho_{\uparrow \uparrow}({\bf r}), \rho_{\downarrow \downarrow}({\bf r}), * 2 \Re \rho_{\uparrow \downarrow}({\bf r}), -2 \Im \rho_{\uparrow \downarrow}({\bf r}) ] = * [ \frac{\rho({\bf r})+m_z({\bf r})}{2}, \frac{\rho({\bf r})-m_z({\bf r})}{2}, * m_x({\bf r}), m_y({\bf r}) ] \f$ in the general case of non-collinear magnetic configuration * * At this point it is straightforward to compute the density and magnetization in the interstitial (see add_k_point_contribution_rg()). * The muffin-tin part of the density and magnetization is obtained in a slighlty more complicated way. Recall the * expansion of spinor wave-functions inside the muffin-tin \f$ \alpha \f$ * \f[ * \Psi_{j{\bf k}}^{\sigma}({\bf r}) = \sum_{\xi}^{N_{\xi}^{\alpha}} {S_{\xi}^{\sigma j {\bf k},\alpha}} * f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}(\hat {\bf r}) * \f] * which we insert into expression for the complex density matrix: * \f[ * \rho_{\sigma' \sigma}({\bf r}) = \sum_{j{\bf k}} n_{j{\bf k}} \sum_{\xi}^{N_{\xi}^{\alpha}} * S_{\xi}^{\sigma j {\bf k},\alpha*} f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r) * Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r}) \sum_{\xi'}^{N_{\xi'}^{\alpha}} S_{\xi'}^{\sigma' j{\bf k},\alpha} * f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r}) * \f] * First, we eliminate a sum over bands and k-points by forming an auxiliary density tensor: * \f[ * D_{\xi \sigma, \xi' \sigma'}^{\alpha} = \sum_{j{\bf k}} n_{j{\bf k}} S_{\xi}^{\sigma j {\bf k},\alpha*} * S_{\xi'}^{\sigma' j {\bf k},\alpha} * \f] * The expression for complex density matrix simplifies to: * \f[ * \rho_{\sigma' \sigma}({\bf r}) = \sum_{\xi \xi'} D_{\xi \sigma, \xi' \sigma'}^{\alpha} * f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r}) * f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r}) * \f] * Now we can switch to the real density matrix and write its' expansion in real spherical harmonics. Let's take * non-magnetic case as an example: * \f[ * \rho({\bf r}) = \sum_{\xi \xi'} D_{\xi \xi'}^{\alpha} * f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r}) * f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r}) = * \sum_{\ell_3 m_3} \rho_{\ell_3 m_3}^{\alpha}(r) R_{\ell_3 m_3}(\hat {\bf r}) * \f] * where * \f[ * \rho_{\ell_3 m_3}^{\alpha}(r) = \sum_{\xi \xi'} D_{\xi \xi'}^{\alpha} f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r) * f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r) \langle Y_{\ell_{\xi}m_{\xi}} | R_{\ell_3 m_3} | Y_{\ell_{\xi'}m_{\xi'}} \rangle * \f] * We are almost done. Now it is time to switch to the full index notation \f$ \xi \rightarrow \{ \ell \lambda m \} \f$ * and sum over \a m and \a m' indices: * \f[ * \rho_{\ell_3 m_3}^{\alpha}(r) = \sum_{\ell \lambda, \ell' \lambda'} f_{\ell \lambda}^{\alpha}(r) * f_{\ell' \lambda'}^{\alpha}(r) d_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha} * \f] * where * \f[ * d_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha} = * \sum_{mm'} D_{\ell \lambda m, \ell' \lambda' m'}^{\alpha} * \langle Y_{\ell m} | R_{\ell_3 m_3} | Y_{\ell' m'} \rangle * \f] * This is our final answer: radial components of density and magnetization are expressed as a linear combination of * quadratic forms in radial functions. * * \note density and potential are allocated as global function because it's easier to load and save them. */ class Density { private: /// Context of the simulation. Simulation_context& ctx_; /// Alias to ctx_.unit_cell() Unit_cell& unit_cell_; /// Density matrix for all atoms. mdarray<double_complex, 4> density_matrix_; // TODO: make it local for LAPW struct paw_density_data_t { Atom *atom_{nullptr}; int ia{-1}; /// ae and ps local unified densities+magnetization std::vector<Spheric_function<spectral, double>> ae_density_; std::vector<Spheric_function<spectral, double>> ps_density_; }; std::vector<paw_density_data_t> paw_density_data_; /// Pointer to charge density. /** In the case of full-potential calculation this is the full (valence + core) electron charge density. * In the case of pseudopotential this is the valence charge density. */ std::unique_ptr<Periodic_function<double>> rho_{nullptr}; /// Magnetization. std::array<std::unique_ptr<Periodic_function<double>>, 3> magnetization_; /// Alias for density and magnetization. std::array<Periodic_function<double>*, 4> rho_vec_{{nullptr, nullptr, nullptr, nullptr}}; /// Density and magnetization on the coarse FFT mesh. /** Coarse FFT grid is enough to generate density and magnetization from the wave-functions. The components * of the <tt>rho_mag_coarse</tt> vector have the following order: * \f$ \{\rho({\bf r}), m_z({\bf r}), m_x({\bf r}), m_y({\bf r}) \} \f$. */ std::array<std::unique_ptr<Smooth_periodic_function<double>>, 4> rho_mag_coarse_; /// Pointer to pseudo core charge density /** In the case of pseudopotential we need to know the non-linear core correction to the * exchange-correlation energy which is introduced trough the pseudo core density: * \f$ E_{xc}[\rho_{val} + \rho_{core}] \f$. The 'pseudo' reflects the fact that * this density integrated does not reproduce the total number of core elctrons. */ std::unique_ptr<Smooth_periodic_function<double>> rho_pseudo_core_{nullptr}; /// Non-zero Gaunt coefficients. std::unique_ptr<Gaunt_coefficients<double_complex>> gaunt_coefs_{nullptr}; /// Fast mapping between composite lm index and corresponding orbital quantum number. mdarray<int, 1> l_by_lm_; /// High-frequency mixer for the pseudopotential density mixing. std::unique_ptr<Mixer<double_complex>> hf_mixer_{nullptr}; /// Low-frequency mixer for the pseudopotential density mixing. std::unique_ptr<Mixer<double_complex>> lf_mixer_{nullptr}; /// Mixer for the full-potential density mixing. std::unique_ptr<Mixer<double>> mixer_{nullptr}; /// List of local low-fequency G-vectors. std::vector<int> lf_gvec_; /// List of local high-fequency G-vectors. std::vector<int> hf_gvec_; /// Weights of local low-frequency G-vectors. std::vector<double> lf_gvec_weights_; /// Allocate PAW data. void init_paw(); void generate_paw_atom_density(paw_density_data_t &pdd); /// Initialize \rho_{ij} - density matrix, occupation on basis of beta-projectors (used for PAW). void init_density_matrix_for_paw(); /// Reduce complex density matrix over magnetic quantum numbers /** The following operation is performed: * \f[ * n_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha} = * \sum_{mm'} D_{\ell \lambda m, \ell' \lambda' m'}^{\alpha} * \langle Y_{\ell m} | R_{\ell_3 m_3} | Y_{\ell' m'} \rangle * \f] */ template <int num_mag_dims> void reduce_density_matrix(Atom_type const& atom_type__, int ia__, mdarray<double_complex, 4> const& zdens__, Gaunt_coefficients<double_complex> const& gaunt_coeffs__, mdarray<double, 3>& mt_density_matrix__) { mt_density_matrix__.zero(); #pragma omp parallel for default(shared) for (int idxrf2 = 0; idxrf2 < atom_type__.mt_radial_basis_size(); idxrf2++) { int l2 = atom_type__.indexr(idxrf2).l; for (int idxrf1 = 0; idxrf1 <= idxrf2; idxrf1++) { int offs = idxrf2 * (idxrf2 + 1) / 2 + idxrf1; int l1 = atom_type__.indexr(idxrf1).l; int xi2 = atom_type__.indexb().index_by_idxrf(idxrf2); for (int lm2 = Utils::lm_by_l_m(l2, -l2); lm2 <= Utils::lm_by_l_m(l2, l2); lm2++, xi2++) { int xi1 = atom_type__.indexb().index_by_idxrf(idxrf1); for (int lm1 = Utils::lm_by_l_m(l1, -l1); lm1 <= Utils::lm_by_l_m(l1, l1); lm1++, xi1++) { for (int k = 0; k < gaunt_coeffs__.num_gaunt(lm1, lm2); k++) { int lm3 = gaunt_coeffs__.gaunt(lm1, lm2, k).lm3; auto gc = gaunt_coeffs__.gaunt(lm1, lm2, k).coef; switch (num_mag_dims) { case 3: { mt_density_matrix__(lm3, offs, 2) += 2.0 * std::real(zdens__(xi1, xi2, 2, ia__) * gc); mt_density_matrix__(lm3, offs, 3) -= 2.0 * std::imag(zdens__(xi1, xi2, 2, ia__) * gc); } case 1: { mt_density_matrix__(lm3, offs, 1) += std::real(zdens__(xi1, xi2, 1, ia__) * gc); } case 0: { mt_density_matrix__(lm3, offs, 0) += std::real(zdens__(xi1, xi2, 0, ia__) * gc); } } } } } } } } /// Add k-point contribution to the density matrix in the canonical form. /** In case of full-potential LAPW complex density matrix has the following expression: * \f[ * d_{\xi \sigma, \xi' \sigma'}^{\alpha} = \sum_{j{\bf k}} n_{j{\bf k}} * S_{\xi}^{\sigma j {\bf k},\alpha*} S_{\xi'}^{\sigma' j {\bf k},\alpha} * \f] * * where \f$ S_{\xi}^{\sigma j {\bf k},\alpha} \f$ are the expansion coefficients of * spinor wave functions inside muffin-tin spheres. * * In case of LDA+U the occupation matrix is also computed. It has the following expression: * \f[ * n_{\ell,mm'}^{\sigma \sigma'} = \sum_{i {\bf k}}^{occ} \int_{0}^{R_{MT}} r^2 dr * \Psi_{\ell m}^{i{\bf k}\sigma *}({\bf r}) \Psi_{\ell m'}^{i{\bf k}\sigma'}({\bf r}) * \f] * * In case of ultrasoft pseudopotential the following density matrix has to be computed for each atom: * \f[ * d_{\xi \xi'}^{\alpha} = \langle \beta_{\xi}^{\alpha} | \hat N | \beta_{\xi'}^{\alpha} \rangle = * \sum_{j {\bf k}} \langle \beta_{\xi}^{\alpha} | \Psi_{j{\bf k}} \rangle n_{j{\bf k}} * \langle \Psi_{j{\bf k}} | \beta_{\xi'}^{\alpha} \rangle * \f] * Here \f$ \hat N = \sum_{j{\bf k}} | \Psi_{j{\bf k}} \rangle n_{j{\bf k}} \langle \Psi_{j{\bf k}} | \f$ is * the occupancy operator written in spectral representation. */ template <typename T> inline void add_k_point_contribution_dm(K_point* kp__, mdarray<double_complex, 4>& density_matrix__); /// Add k-point contribution to the density and magnetization defined on the regular FFT grid. inline void add_k_point_contribution_rg(K_point* kp__); /// Generate valence density in the muffin-tins void generate_valence_mt(K_point_set& ks); /// Generate charge density of core states void generate_core_charge_density() { PROFILE("sirius::Density::generate_core_charge_density"); for (int icloc = 0; icloc < unit_cell_.spl_num_atom_symmetry_classes().local_size(); icloc++) { int ic = unit_cell_.spl_num_atom_symmetry_classes(icloc); unit_cell_.atom_symmetry_class(ic).generate_core_charge_density(ctx_.core_relativity()); } for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) { int rank = unit_cell_.spl_num_atom_symmetry_classes().local_rank(ic); unit_cell_.atom_symmetry_class(ic).sync_core_charge_density(ctx_.comm(), rank); } } void generate_pseudo_core_charge_density() { PROFILE("sirius::Density::generate_pseudo_core_charge_density"); auto ri = Radial_integrals_rho_core_pseudo<false>(ctx_.unit_cell(), ctx_.pw_cutoff(), ctx_.settings().nprii_rho_core_); auto v = ctx_.make_periodic_function<index_domain_t::local>([&ri](int iat, double g) { return ri.value<int>(iat, g); }); std::copy(v.begin(), v.end(), &rho_pseudo_core_->f_pw_local(0)); rho_pseudo_core_->fft_transform(1); } public: /// Constructor Density(Simulation_context& ctx__) : ctx_(ctx__) , unit_cell_(ctx_.unit_cell()) { /* allocate charge density */ rho_ = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, ctx_.lmmax_rho())); rho_vec_[0] = rho_.get(); /* allocate magnetization density */ for (int i = 0; i < ctx_.num_mag_dims(); i++) { magnetization_[i] = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, ctx_.lmmax_rho())); rho_vec_[i + 1] = magnetization_[i].get(); } /* allocate charge density and magnetization on a coarse grid */ for (int i = 0; i < ctx_.num_mag_dims() + 1; i++) { rho_mag_coarse_[i] = std::unique_ptr<Smooth_periodic_function<double>>(new Smooth_periodic_function<double>(ctx_.fft_coarse(), ctx_.gvec_coarse_partition())); } /* core density of the pseudopotential method */ if (!ctx_.full_potential()) { rho_pseudo_core_ = std::unique_ptr<Smooth_periodic_function<double>>(new Smooth_periodic_function<double>(ctx_.fft(), ctx_.gvec_partition())); rho_pseudo_core_->zero(); bool is_empty{true}; for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { is_empty &= unit_cell_.atom_type(iat).ps_core_charge_density().empty(); } if (!is_empty) { generate_pseudo_core_charge_density(); } } if (ctx_.full_potential()) { using gc_z = Gaunt_coefficients<double_complex>; gaunt_coefs_ = std::unique_ptr<gc_z>(new gc_z(ctx_.lmax_apw(), ctx_.lmax_rho(), ctx_.lmax_apw(), SHT::gaunt_hybrid)); } l_by_lm_ = Utils::l_by_lm(ctx_.lmax_rho()); density_matrix_ = mdarray<double_complex, 4>(unit_cell_.max_mt_basis_size(), unit_cell_.max_mt_basis_size(), ctx_.num_mag_comp(), unit_cell_.num_atoms()); density_matrix_.zero(); /* split local G-vectors to low-frequency and high-frequency */ for (int igloc = 0; igloc < ctx_.gvec().count(); igloc++) { int ig = ctx_.gvec().offset() + igloc; auto gv = ctx_.gvec().gvec_cart(ig); if (gv.length() <= 2 * ctx_.gk_cutoff()) { lf_gvec_.push_back(igloc); if (ig) { lf_gvec_weights_.push_back(fourpi * unit_cell_.omega() / std::pow(gv.length(), 2)); } else { lf_gvec_weights_.push_back(0); } } else { hf_gvec_.push_back(igloc); } } } /// Set pointers to muffin-tin and interstitial charge density arrays void set_charge_density_ptr(double* rhomt, double* rhorg) { if (ctx_.full_potential() && rhomt) { rho_->set_mt_ptr(rhomt); } if (rhorg) { rho_->set_rg_ptr(rhorg); } } /// Set pointers to muffin-tin and interstitial magnetization arrays void set_magnetization_ptr(double* magmt, double* magir) { if (ctx_.num_mag_dims() == 0) { return; } assert(ctx_.num_spins() == 2); // set temporary array wrapper mdarray<double, 4> magmt_tmp(magmt, ctx_.lmmax_rho(), unit_cell_.max_num_mt_points(), unit_cell_.num_atoms(), ctx_.num_mag_dims()); mdarray<double, 2> magir_tmp(magir, ctx_.fft().size(), ctx_.num_mag_dims()); if (ctx_.num_mag_dims() == 1) { /* z component is the first and only one */ if (magmt) { magnetization_[0]->set_mt_ptr(&magmt_tmp(0, 0, 0, 0)); } if (magir) { magnetization_[0]->set_rg_ptr(&magir_tmp(0, 0)); } } if (ctx_.num_mag_dims() == 3) { if (magmt) { /* z component is the first */ magnetization_[0]->set_mt_ptr(&magmt_tmp(0, 0, 0, 2)); /* x component is the second */ magnetization_[1]->set_mt_ptr(&magmt_tmp(0, 0, 0, 0)); /* y component is the third */ magnetization_[2]->set_mt_ptr(&magmt_tmp(0, 0, 0, 1)); } if (magir) { /* z component is the first */ magnetization_[0]->set_rg_ptr(&magir_tmp(0, 2)); /* x component is the second */ magnetization_[1]->set_rg_ptr(&magir_tmp(0, 0)); /* y component is the third */ magnetization_[2]->set_rg_ptr(&magir_tmp(0, 1)); } } } /// Zero density and magnetization void zero() { rho_->zero(); for (int i = 0; i < ctx_.num_mag_dims(); i++) { magnetization_[i]->zero(); } } /// Find the total leakage of the core states out of the muffin-tins double core_leakage() { double sum = 0.0; for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) { sum += core_leakage(ic) * unit_cell_.atom_symmetry_class(ic).num_atoms(); } return sum; } /// Return core leakage for a specific atom symmetry class double core_leakage(int ic) { return unit_cell_.atom_symmetry_class(ic).core_leakage(); } /// Generate initial charge density and magnetization void initial_density(); void initial_density_pseudo(); void initial_density_full_pot(); /// Check total density for the correct number of electrons. inline void check_num_electrons() { double nel{0}; if (ctx_.full_potential()) { std::vector<double> nel_mt; double nel_it; nel = rho_->integrate(nel_mt, nel_it); } else { nel = rho_->f_0().real() * unit_cell_.omega(); } /* check the number of electrons */ if (std::abs(nel - unit_cell_.num_electrons()) > 1e-5 && ctx_.comm().rank() == 0) { std::stringstream s; s << "wrong number of electrons" << std::endl << " obtained value : " << nel << std::endl << " target value : " << unit_cell_.num_electrons() << std::endl << " difference : " << std::abs(nel - unit_cell_.num_electrons()) << std::endl; if (ctx_.full_potential()) { s << " total core leakage : " << core_leakage(); for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) { s << std::endl << " atom class : " << ic << ", core leakage : " << core_leakage(ic); } } WARNING(s); } } /// Generate full charge density (valence + core) and magnetization from the wave functions. /** This function calls generate_valence() and then in case of full-potential LAPW method adds a core density * to get the full charge density of the system. */ inline void generate(K_point_set& ks__) { PROFILE("sirius::Density::generate"); generate_valence(ks__); if (ctx_.full_potential()) { /* find the core states */ generate_core_charge_density(); /* add core contribution */ for (int ialoc = 0; ialoc < (int)unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); for (int ir = 0; ir < unit_cell_.atom(ia).num_mt_points(); ir++) { rho_->f_mt<index_domain_t::local>(0, ir, ialoc) += unit_cell_.atom(ia).symmetry_class().ae_core_charge_density(ir) / y00; } } /* synchronize muffin-tin part */ rho_->sync_mt(); for (int j = 0; j < ctx_.num_mag_dims(); j++) { magnetization_[j]->sync_mt(); } } } /// Generate valence charge density and magnetization from the wave functions. /** The interstitial density is generated on the coarse FFT grid and then transformed to the PW domain. * After symmetrization and mixing and before the generation of the XC potential density is transformted to the * real-space domain and checked for the number of electrons. */ inline void generate_valence(K_point_set& ks__); /// Add augmentation charge Q(r). /** Restore valence density by adding the Q-operator constribution. * The following term is added to the valence density, generated by the pseudo wave-functions: * \f[ * \tilde \rho({\bf G}) = \sum_{\alpha} \sum_{\xi \xi'} d_{\xi \xi'}^{\alpha} Q_{\xi' \xi}^{\alpha}({\bf G}) * \f] * Plane-wave coefficients of the Q-operator for a given atom \f$ \alpha \f$ can be obtained from the * corresponding coefficients of the Q-operator for a given atom \a type A: * \f[ * Q_{\xi' \xi}^{\alpha(A)}({\bf G}) = e^{-i{\bf G}\tau_{\alpha(A)}} Q_{\xi' \xi}^{A}({\bf G}) * \f] * We use this property to split the sum over atoms into sum over atom types and inner sum over atoms of the * same type: * \f[ * \tilde \rho({\bf G}) = \sum_{A} \sum_{\xi \xi'} Q_{\xi' \xi}^{A}({\bf G}) \sum_{\alpha(A)} * d_{\xi \xi'}^{\alpha(A)} e^{-i{\bf G}\tau_{\alpha(A)}} = * \sum_{A} \sum_{\xi \xi'} Q_{\xi' \xi}^{A}({\bf G}) d_{\xi \xi'}^{A}({\bf G}) * \f] * where * \f[ * d_{\xi \xi'}^{A}({\bf G}) = \sum_{\alpha(A)} d_{\xi \xi'}^{\alpha(A)} e^{-i{\bf G}\tau_{\alpha(A)}} * \f] */ void augment(K_point_set& ks__) { PROFILE("sirius::Density::augment"); /*check if we need to augment charge density and magnetization */ bool need_to_augment{false}; for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { need_to_augment |= unit_cell_.atom_type(iat).augment(); } if (!need_to_augment) { return; } //if (ctx_.control().print_checksum_) { // for (auto e: rho_vec_) { // auto cs = e->checksum_pw(); // DUMP("checksum(rho_vec_pw): %20.14f %20.14f", cs.real(), cs.imag()); // } //} mdarray<double_complex, 2> rho_aug(ctx_.gvec().count(), ctx_.num_mag_dims() + 1, ctx_.dual_memory_t()); switch (ctx_.processing_unit()) { case CPU: { generate_rho_aug<CPU>(rho_aug); break; } case GPU: { generate_rho_aug<GPU>(rho_aug); break; } } for (int iv = 0; iv < ctx_.num_mag_dims() + 1; iv++) { #pragma omp parallel for schedule(static) for (int igloc = 0; igloc < ctx_.gvec().count(); igloc++) { rho_vec_[iv]->f_pw_local(igloc) += rho_aug(igloc, iv); } } } template <device_t pu> inline void generate_rho_aug(mdarray<double_complex, 2>& rho_aug__); /// Check density at MT boundary void check_density_continuity_at_mt(); void save() { rho_->hdf5_write(storage_file_name, "density"); for (int j = 0; j < ctx_.num_mag_dims(); j++) { std::stringstream s; s << "magnetization/" << j; magnetization_[j]->hdf5_write(storage_file_name, s.str()); } ctx_.comm().barrier(); } void load() { HDF5_tree fin(storage_file_name, hdf5_access_t::read_only); int ngv; fin.read("/parameters/num_gvec", &ngv, 1); if (ngv != ctx_.gvec().num_gvec()) { TERMINATE("wrong number of G-vectors"); } mdarray<int, 2> gv(3, ngv); fin.read("/parameters/gvec", gv); rho_->hdf5_read(fin["density"], gv); rho_->fft_transform(1); for (int j = 0; j < ctx_.num_mag_dims(); j++) { magnetization_[j]->hdf5_read(fin["magnetization"][j], gv); magnetization_[j]->fft_transform(1); } } void save_to_xsf() { //== FILE* fout = fopen("unit_cell.xsf", "w"); //== fprintf(fout, "CRYSTAL\n"); //== fprintf(fout, "PRIMVEC\n"); //== auto& lv = unit_cell_.lattice_vectors(); //== for (int i = 0; i < 3; i++) //== { //== fprintf(fout, "%18.12f %18.12f %18.12f\n", lv(0, i), lv(1, i), lv(2, i)); //== } //== fprintf(fout, "CONVVEC\n"); //== for (int i = 0; i < 3; i++) //== { //== fprintf(fout, "%18.12f %18.12f %18.12f\n", lv(0, i), lv(1, i), lv(2, i)); //== } //== fprintf(fout, "PRIMCOORD\n"); //== fprintf(fout, "%i 1\n", unit_cell_.num_atoms()); //== for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) //== { //== auto pos = unit_cell_.get_cartesian_coordinates(unit_cell_.atom(ia).position()); //== fprintf(fout, "%i %18.12f %18.12f %18.12f\n", unit_cell_.atom(ia).zn(), pos[0], pos[1], pos[2]); //== } //== fclose(fout); } void save_to_ted() { //== void write_periodic_function() //== { //== //== mdarray<double, 3> vloc_3d_map(&vloc_it[0], fft_->size(0), fft_->size(1), fft_->size(2)); //== //== int nx = fft_->size(0); //== //== int ny = fft_->size(1); //== //== int nz = fft_->size(2); //== //== auto p = parameters_.unit_cell()->unit_cell_parameters(); //== //== FILE* fout = fopen("potential.ted", "w"); //== //== fprintf(fout, "%s\n", parameters_.unit_cell()->chemical_formula().c_str()); //== //== fprintf(fout, "%16.10f %16.10f %16.10f %16.10f %16.10f %16.10f\n", p.a, p.b, p.c, p.alpha, p.beta, p.gamma); //== //== fprintf(fout, "%i %i %i\n", nx + 1, ny + 1, nz + 1); //== //== for (int i0 = 0; i0 <= nx; i0++) //== //== { //== //== for (int i1 = 0; i1 <= ny; i1++) //== //== { //== //== for (int i2 = 0; i2 <= nz; i2++) //== //== { //== //== fprintf(fout, "%14.8f\n", vloc_3d_map(i0 % nx, i1 % ny, i2 % nz)); //== //== } //== //== } //== //== } //== //== fclose(fout); //== } } void save_to_xdmf() { //== mdarray<double, 3> rho_grid(&rho_->f_it<global>(0), fft_->size(0), fft_->size(1), fft_->size(2)); //== mdarray<double, 4> pos_grid(3, fft_->size(0), fft_->size(1), fft_->size(2)); //== mdarray<double, 4> mag_grid(3, fft_->size(0), fft_->size(1), fft_->size(2)); //== mag_grid.zero(); //== // loop over 3D array (real space) //== for (int j0 = 0; j0 < fft_->size(0); j0++) //== { //== for (int j1 = 0; j1 < fft_->size(1); j1++) //== { //== for (int j2 = 0; j2 < fft_->size(2); j2++) //== { //== int ir = static_cast<int>(j0 + j1 * fft_->size(0) + j2 * fft_->size(0) * fft_->size(1)); //== // get real space fractional coordinate //== double frv[] = {double(j0) / fft_->size(0), //== double(j1) / fft_->size(1), //== double(j2) / fft_->size(2)}; //== vector3d<double> rv = ctx_.unit_cell()->get_cartesian_coordinates(vector3d<double>(frv)); //== for (int x = 0; x < 3; x++) pos_grid(x, j0, j1, j2) = rv[x]; //== if (ctx_.num_mag_dims() == 1) mag_grid(2, j0, j1, j2) = magnetization_[0]->f_it<global>(ir); //== if (ctx_.num_mag_dims() == 3) //== { //== mag_grid(0, j0, j1, j2) = magnetization_[1]->f_it<global>(ir); //== mag_grid(1, j0, j1, j2) = magnetization_[2]->f_it<global>(ir); //== } //== } //== } //== } //== HDF5_tree h5_rho("rho.hdf5", true); //== h5_rho.write("rho", rho_grid); //== h5_rho.write("pos", pos_grid); //== h5_rho.write("mag", mag_grid); //== FILE* fout = fopen("rho.xdmf", "w"); //== //== fprintf(fout, "<?xml version=\"1.0\" ?>\n" //== //== "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n" //== //== "<Xdmf>\n" //== //== " <Domain Name=\"name1\">\n" //== //== " <Grid Name=\"fft_fine_grid\" Collection=\"Unknown\">\n" //== //== " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\" %i %i %i \"/>\n" //== //== " <Geometry GeometryType=\"XYZ\">\n" //== //== " <DataItem Dimensions=\"%i %i %i 3\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">rho.hdf5:/pos</DataItem>\n" //== //== " </Geometry>\n" //== //== " <Attribute\n" //== //== " AttributeType=\"Scalar\"\n" //== //== " Center=\"Node\"\n" //== //== " Name=\"rho\">\n" //== //== " <DataItem\n" //== //== " NumberType=\"Float\"\n" //== //== " Precision=\"8\"\n" //== //== " Dimensions=\"%i %i %i\"\n" //== //== " Format=\"HDF\">\n" //== //== " rho.hdf5:/rho\n" //== //== " </DataItem>\n" //== //== " </Attribute>\n" //== //== " </Grid>\n" //== //== " </Domain>\n" //== //== "</Xdmf>\n", fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2)); //== fprintf(fout, "<?xml version=\"1.0\" ?>\n" //== "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n" //== "<Xdmf>\n" //== " <Domain Name=\"name1\">\n" //== " <Grid Name=\"fft_fine_grid\" Collection=\"Unknown\">\n" //== " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\" %i %i %i \"/>\n" //== " <Geometry GeometryType=\"XYZ\">\n" //== " <DataItem Dimensions=\"%i %i %i 3\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">rho.hdf5:/pos</DataItem>\n" //== " </Geometry>\n" //== " <Attribute\n" //== " AttributeType=\"Vector\"\n" //== " Center=\"Node\"\n" //== " Name=\"mag\">\n" //== " <DataItem\n" //== " NumberType=\"Float\"\n" //== " Precision=\"8\"\n" //== " Dimensions=\"%i %i %i 3\"\n" //== " Format=\"HDF\">\n" //== " rho.hdf5:/mag\n" //== " </DataItem>\n" //== " </Attribute>\n" //== " </Grid>\n" //== " </Domain>\n" //== "</Xdmf>\n", fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2)); //== fclose(fout); } Periodic_function<double>& rho() { return *rho_; } Periodic_function<double> const& rho() const { return *rho_; } Smooth_periodic_function<double>& rho_pseudo_core() { return *rho_pseudo_core_; } Smooth_periodic_function<double> const& rho_pseudo_core() const { return *rho_pseudo_core_; } std::array<Periodic_function<double>*, 3> magnetization() { return {magnetization_[0].get(), magnetization_[1].get(), magnetization_[2].get()}; } Periodic_function<double>& magnetization(int i) { return *(magnetization_[i]); } Periodic_function<double> const& magnetization(int i) const { return *(magnetization_[i]); } Spheric_function<spectral, double> const& density_mt(int ialoc) const { return rho_->f_mt(ialoc); } /// Generate \f$ n_1 \f$ and \f$ \tilde{n}_1 \f$ in lm components. void generate_paw_loc_density(); std::vector<Spheric_function<spectral, double>> const& ae_paw_atom_density(int spl_paw_ind) const { return paw_density_data_[spl_paw_ind].ae_density_; } std::vector<Spheric_function<spectral, double>> const& ps_paw_atom_density(int spl_paw_ind) const { return paw_density_data_[spl_paw_ind].ps_density_; } // mdarray<double, 3> const& ae_paw_atom_magn(int spl_paw_ind) const // { // return paw_density_data_[spl_paw_ind].ae_magnetization_; // } // // mdarray<double, 3> const& ps_paw_atom_magn(int spl_paw_ind) const // { // return paw_density_data_[spl_paw_ind].ps_magnetization_; // } void allocate() { rho_->allocate_mt(true); for (int j = 0; j < ctx_.num_mag_dims(); j++) { magnetization_[j]->allocate_mt(true); } } void mixer_input() { if (ctx_.full_potential()) { STOP(); } else { int ld = static_cast<int>(hf_gvec_.size()); /* input high-frequency components */ for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) { for (int i = 0; i < static_cast<int>(hf_gvec_.size()); i++) { int igloc = hf_gvec_[i]; hf_mixer_->input_local(i + j * ld, rho_vec_[j]->f_pw_local(igloc)); } } ld = static_cast<int>(lf_gvec_.size()); /* input low-frequency components */ for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) { if (j == 0) { for (int i = 0; i < static_cast<int>(lf_gvec_.size()); i++) { int igloc = lf_gvec_[i]; lf_mixer_->input_local(i + j * ld, rho_vec_[j]->f_pw_local(igloc), lf_gvec_weights_[i]); } } else { for (int i = 0; i < static_cast<int>(lf_gvec_.size()); i++) { int igloc = lf_gvec_[i]; lf_mixer_->input_local(i + j * ld, rho_vec_[j]->f_pw_local(igloc)); } } } /* input commonly shared data */ for (int i = 0; i < static_cast<int>(density_matrix_.size()); i++) { lf_mixer_->input_shared(i, density_matrix_[i], 0); } } } void mixer_output() { if (ctx_.full_potential()) { STOP(); } else { int ld = static_cast<int>(hf_gvec_.size()); /* get high-frequency components */ for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) { for (int i = 0; i < static_cast<int>(hf_gvec_.size()); i++) { int igloc = hf_gvec_[i]; rho_vec_[j]->f_pw_local(igloc) = hf_mixer_->output_local(i + j * ld); } } ld = static_cast<int>(lf_gvec_.size()); /* get low-frequency components */ for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) { for (int i = 0; i < static_cast<int>(lf_gvec_.size()); i++) { int igloc = lf_gvec_[i]; rho_vec_[j]->f_pw_local(igloc) = lf_mixer_->output_local(i + j * ld); } } for (int i = 0; i < static_cast<int>(density_matrix_.size()); i++) { density_matrix_[i] = lf_mixer_->output_shared(i); } } } void mixer_init() { if (!ctx_.full_potential()) { hf_mixer_ = Mixer_factory<double_complex>("linear", 0, static_cast<int>(hf_gvec_.size() * (1 + ctx_.num_mag_dims())), ctx_.mixer_input(), ctx_.comm()); lf_mixer_ = Mixer_factory<double_complex>(ctx_.mixer_input().type_, static_cast<int>(density_matrix_.size()), static_cast<int>(lf_gvec_.size() * (1 + ctx_.num_mag_dims())), ctx_.mixer_input(), ctx_.comm()); } else { //mixer_ = Mixer_factory<double>(ctx_.mixer_input().type_, size(), ctx_.mixer_input(), ctx_.comm()); } mixer_input(); if (ctx_.full_potential()) { mixer_->initialize(); } else { lf_mixer_->initialize(); if (hf_mixer_) { hf_mixer_->initialize(); } } } double mix() { double rms; if (ctx_.full_potential()) { STOP(); /* mix in real-space in case of FP-LAPW */ mixer_input(); rms = mixer_->mix(ctx_.settings().mixer_rss_min_); mixer_output(); /* get rho(G) after mixing */ rho_->fft_transform(-1); } else { /* mix in G-space in case of PP */ mixer_input(); rms = lf_mixer_->mix(ctx_.settings().mixer_rss_min_); if (hf_mixer_) { rms += hf_mixer_->mix(ctx_.settings().mixer_rss_min_); } mixer_output(); } return rms; } inline double dr2() { return lf_mixer_->rss(); } mdarray<double_complex, 4> const& density_matrix() const { return density_matrix_; } mdarray<double_complex, 4>& density_matrix() { return density_matrix_; } inline void fft_transform(int direction__) { rho_->fft_transform(direction__); for (int j = 0; j < ctx_.num_mag_dims(); j++) { magnetization_[j]->fft_transform(direction__); } } /// Return density matrix in auxiliary form. inline mdarray<double, 3> density_matrix_aux(int iat__) { auto& atom_type = unit_cell_.atom_type(iat__); int nbf = atom_type.mt_basis_size(); /* convert to real matrix */ mdarray<double, 3> dm(nbf * (nbf + 1) / 2, atom_type.num_atoms(), ctx_.num_mag_dims() + 1); #pragma omp parallel for for (int i = 0; i < atom_type.num_atoms(); i++) { int ia = atom_type.atom_id(i); for (int xi2 = 0; xi2 < nbf; xi2++) { for (int xi1 = 0; xi1 <= xi2; xi1++) { int idx12 = xi2 * (xi2 + 1) / 2 + xi1; switch (ctx_.num_mag_dims()) { case 3: { dm(idx12, i, 2) = 2 * std::real(density_matrix_(xi2, xi1, 2, ia)); dm(idx12, i, 3) = -2 * std::imag(density_matrix_(xi2, xi1, 2, ia)); } case 1: { dm(idx12, i, 0) = std::real(density_matrix_(xi2, xi1, 0, ia) + density_matrix_(xi2, xi1, 1, ia)); dm(idx12, i, 1) = std::real(density_matrix_(xi2, xi1, 0, ia) - density_matrix_(xi2, xi1, 1, ia)); break; } case 0: { dm(idx12, i, 0) = density_matrix_(xi2, xi1, 0, ia).real(); break; } } } } } return std::move(dm); } /// Calculate magnetic moment of the atoms /// Compute approximate atomic magnetic moments in case of PW-PP. mdarray<double, 2> compute_atomic_mag_mom() const { PROFILE("sirius::DFT_ground_state::compute_atomic_mag_mom"); mdarray<double, 2> mmom(3, unit_cell_.num_atoms()); mmom.zero(); #pragma omp parallel for for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { auto& atom_to_grid_map = ctx_.atoms_to_grid_idx_map()[ia]; for (auto coord : atom_to_grid_map) { int ir = coord.first; for (int j = 0; j < ctx_.num_mag_dims(); j++) { mmom(j, ia) += magnetization(j).f_rg(ir); } } for (int j: {0, 1, 2}) { mmom(j, ia) *= (unit_cell_.omega() / ctx_.fft().size()); } } ctx_.fft().comm().allreduce(&mmom(0, 0), static_cast<int>(mmom.size())); return std::move(mmom); } /// Symmetrize density matrix. /** Initially, density matrix is obtained with summation over irreducible BZ: * \f[ * \tilde n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} = * \sum_{j} \sum_{{\bf k}}^{IBZ} \langle Y_{\ell m} u_{\ell \lambda}^{\alpha}| \Psi_{j{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}} * \langle \Psi_{j{\bf k}}^{\sigma'} | u_{\ell' \lambda'}^{\alpha} Y_{\ell' m'} \rangle * \f] * In order to symmetrize it, the following operation is performed: * \f[ * n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} = \sum_{{\bf P}} * \sum_{j} \sum_{\bf k}^{IBZ} \langle Y_{\ell m} u_{\ell \lambda}^{\alpha}| \Psi_{j{\bf P}{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}} * \langle \Psi_{j{\bf P}{\bf k}}^{\sigma'} | u_{\ell' \lambda'}^{\alpha} Y_{\ell' m'} \rangle * \f] * where \f$ {\bf P} \f$ is the space-group symmetry operation. The inner product between wave-function and * local orbital is transformed as: * \f[ * \langle \Psi_{j{\bf P}{\bf k}}^{\sigma} | u_{\ell \lambda}^{\alpha} Y_{\ell m} \rangle = * \int \Psi_{j{\bf P}{\bf k}}^{\sigma *}({\bf r}) u_{\ell \lambda}^{\alpha}(r) Y_{\ell m}(\hat {\bf r}) dr = * \int \Psi_{j{\bf k}}^{\sigma *}({\bf P}^{-1}{\bf r}) u_{\ell \lambda}^{\alpha}(r) Y_{\ell m}(\hat {\bf r}) dr = * \int \Psi_{j{\bf k}}^{\sigma *}({\bf r}) u_{\ell \lambda}^{{\bf P}\alpha}(r) Y_{\ell m}({\bf P} \hat{\bf r}) dr * \f] * Under rotation the spherical harmonic is transformed as: * \f[ * Y_{\ell m}({\bf P} \hat{\bf r}) = {\bf P}^{-1}Y_{\ell m}(\hat {\bf r}) = \sum_{m'} D_{m'm}^{\ell}({\bf P}^{-1}) Y_{\ell m'}(\hat {\bf r}) = * \sum_{m'} D_{mm'}^{\ell}({\bf P}) Y_{\ell m'}(\hat {\bf r}) * \f] * The inner-product integral is then rewritten as: * \f[ * \langle \Psi_{j{\bf P}{\bf k}}^{\sigma} | u_{\ell \lambda}^{\alpha} Y_{\ell m} \rangle = * \sum_{m'} D_{mm'}^{\ell}({\bf P}) \langle \Psi_{j{\bf k}}^{\sigma} | u_{\ell \lambda}^{{\bf P}\alpha} Y_{\ell m} \rangle * \f] * and the final expression for density matrix gets the following form: * \f[ * n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} = \sum_{{\bf P}} * \sum_{j} \sum_{\bf k}^{IBZ} \sum_{m_1 m_2} D_{mm_1}^{\ell *}({\bf P}) D_{m'm_2}^{\ell'}({\bf P}) * \langle Y_{\ell m_1} u_{\ell \lambda}^{{\bf P} \alpha}| * \Psi_{j{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}} \langle \Psi_{j{\bf k}}^{\sigma'} | * u_{\ell' \lambda'}^{{\bf P}\alpha} Y_{\ell' m_2} \rangle = \sum_{{\bf P}} * \sum_{m_1 m_2} D_{mm_1}^{\ell *}({\bf P}) D_{m'm_2}^{\ell'}({\bf P}) * \tilde n_{\ell \lambda m_1 \sigma, \ell' \lambda' m_2 \sigma'}^{{\bf P}\alpha} * \f] */ void symmetrize_density_matrix(); }; #include "Density/initial_density.hpp" #include "Density/add_k_point_contribution_rg.hpp" #include "Density/add_k_point_contribution_dm.hpp" #include "Density/generate_valence.hpp" #include "Density/generate_rho_aug.hpp" #include "Density/symmetrize_density_matrix.hpp" #include "Density/generate_valence_mt.hpp" #include "Density/check_density_continuity_at_mt.hpp" #include "Density/paw_density.hpp" } #endif // __DENSITY_H__
Loop.h
// This file is part of the Peano project. For conditions of distribution and // use, please see the copyright notice at www.peano-framework.org /** * This file defines some macros for d-dimensional loops. * * @version $Revision: 1.10 $ * @author Tobias Weinzierl */ #ifndef _PEANO_UTILS_LOOP_H_ #define _PEANO_UTILS_LOOP_H_ #include "peano/utils/Globals.h" #include "tarch/la/Vector.h" #include "tarch/multicore/Loop.h" #include <bitset> namespace peano { namespace utils { /** * Is used by the z-loop. See macro dforz. */ typedef std::bitset<DIMENSIONS> LoopDirection; /** * This operation performs a d-dimensional increment on a given integer vector: * The first component of the vector is incremented. If the first component is * greater than max-1, the component is set zero and the next component is * incremented by one. This operation is used often by d-dimensional for-loops. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, int max); /** * This operation performs a d-dimensional decrement on a given integer vector: * The first component of the vector is decremented. If the first component is * smaller than 0, the component is set to max and the next component is * decremented by one. */ void dDec(tarch::la::Vector<DIMENSIONS,int>& counter, int max); /** * This operation performs a d-dimensional increment on a given integer vector: * The first component of the vector is incremented. If the first component is * greater than max(0)-1, the component is set zero and the next component is * incremented by one. This operation is used often by d-dimensional for-loops. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, const tarch::la::Vector<DIMENSIONS,int>& max); /** * Perform a d-dimensional increment by value increment: The first component * of the counter is incremented by increment. Afterwards, the operation * checks the first entry: If it exceeds max, its module value is set, the * next component is incremented by increment, and the check continues. */ void dIncByVector(tarch::la::Vector<DIMENSIONS,int>& counter, int max, int increment); /** * Perform a scalar increment of a vector: The operation equals a sequence of * increment calls to dInc(). */ void dIncByScalar(tarch::la::Vector<DIMENSIONS,int>& counter, int max, int increment); /** * Same operation as dInc(tarch::la::Vector<DIMENSIONS,int>,int), but now one dimension is not taken * into consideration. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, int max, int doNotExamine); /** * Operation similar to dInc, but is given a direction bitset that identifies * whether the counters has to be incremented or decremented. See the dforz * macro for an example how to use dInc. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, int max, LoopDirection& direction ); /** * Element-wise comparison for the for loops. * @return true if all entries of counter are smaller max */ int dCmp(const tarch::la::Vector<DIMENSIONS,int>& counter, int max); /** * Element-wise comparison for the loops. * @return true if all entries of counter are smaller than their corresponding * entries in max */ int dCmp(const tarch::la::Vector<DIMENSIONS,int>& counter, const tarch::la::Vector<DIMENSIONS,int>& max); /** * compares two vectors with regards to their linearised value. * * @returns true, if dLinearised(counter, XXX) < dLinearised(max, XXX) */ bool dCmpLinearOrder(const tarch::la::Vector<DIMENSIONS,int>& counter, const tarch::la::Vector<DIMENSIONS,int>& max); /** * This operation is called pretty often and, thus, might cause a significant * slowdown in the overall performance. Therefore, I introduced a aggressive * optimization based on lookup tables. This optimization is switched on if * DLOOP_AGGRESSIVE is specified (default in peano project). Two preconditions * have to be fulfilled in this case: All parameters have to stay within * certain boundaries (all positive, max smaller or equal to 5) * and one has to call both setupLookupTableForDLinearised() and * setupLookupTableForDDelinearised() before using dLinearised() or * dDelinearised(). * * Obviously, creating a lookup table for these two operations is not that * simple, since the parameter space has to be mapped onto a unique key. To * end up with a simple mapping, all the constraints from above are added. * Although the mapping might be slow, it is still faster than computing the * partial sums of a to the power of b. * * @return the linearisation of the counter, i.e. the k-th component is * multiplied by max^k and the results are accumulated. */ int dLinearised( const tarch::la::Vector<DIMENSIONS,int>& counter, int max ); /** * Special 2d variant of dLinearised that works also if you compile with other * dimensions. */ int d2Linearised( const tarch::la::Vector<2,int>& counter, int max ); /** * Special 3d variant of dLinearised that works also if you compile with other * dimensions. */ int d3Linearised( const tarch::la::Vector<3,int>& counter, int max ); /** * Linearisation not Optimised * * This operation's semantics equals dLinearised, but the operation is not * optimised at all. It thus allows to have arbitrary argument values. Yet, * this version is not optimised, i.e. it might become a bottleneck. */ int dLinearisedWithoutLookup( const tarch::la::Vector<DIMENSIONS,int>& counter, int max ); /** * Counterpart of dLinearised(). * * This operation's semantics equals dDeLinearised, but the operation is not * optimised at all. It thus allows to have arbitrary argument values. Yet, * this version is not optimised, i.e. it might become a bottleneck. */ tarch::la::Vector<DIMENSIONS,int> dDelinearised(int value, int max ); /** * Delinearization not optimised. */ tarch::la::Vector<DIMENSIONS,int> dDelinearisedWithoutLookup(int value, int max); void setupLookupTableForDLinearised(); void setupLookupTableForDDelinearised(); /** * @return a vector containing zero values only. */ tarch::la::Vector<DIMENSIONS,int> dStartVector(); /** * @return a vector containing only zero values besides the dim-th entry. This * entry is set value. */ tarch::la::Vector<DIMENSIONS,int> dStartVector(int dim, int value); /** * Creates a start vector. Each component is set either 0 or max-1 depending * on direction: If direction is true, then the value 0 is zero. * * @return a start vector for an osciallating loop. */ tarch::la::Vector<DIMENSIONS,int> dStartVector( int max, const LoopDirection& direction ); } } /** * Very often one needs a d-dimensional for loop. A d-dimensional for loop is * something like * \code * for (x(0)=0; x(0)<N; x(0)++) * for (x(1)=0; x(1)<N; x(1)++) * for (x(2)=0; x(2)<N; x(2)++) * \endcode * with d nested for loops. Thus, one has to code such loops for every d * manually. This macro offers a d-independend alternative, just write * \code * dfor (x,N) { * ... * } * \endcode * The precompiler extracts this macro and within the loop body, you are able * to use the integer tinyvector x. * * Here is an example: * \code * dfor(a,2) { * std::cout << a << ","; * } * \endcode * results in [0,0], [1,0], [0,1], [1,1] if DIMENSIONS equals 2. If DIMENSION * equals 3 the same construct gives you [0,0,0], [1,0,0], [0,1,0], [1,1,0], * [0,0,1], [1,0,1], [0,1,1], [1,1,1]. */ #define dfor(counter,max) \ for (tarch::la::Vector<DIMENSIONS,int> counter = peano::utils::dStartVector(); peano::utils::dCmp(counter,max); peano::utils::dInc(counter,max) ) /** * Shortcut For dfor(counter,4) * * The usage of this optimised shortcut differs from dfor: You have to * replace both the dfor and the opening bracket by this macro, i.e. * * \code * dfor(counter,4) { * \endcode * * becomes * * \code * dfor4(counter) * \endcode * * You usually use this macro with * \code * #pragma unroll(FOUR_POWER_D) * \endcode * or * \code * #pragma omp parallel for schedule(static) * \endcode * * If you work with this specialised version of dfor on a variable k, two * counter variables are available within the loop's scope. The variable k * itself with type tarch::la::Vector<DIMENSIONS,int>. Furthermore, there's always a variable * kScalar giving you k's value linearised. */ #define dfor4(counter) \ for( int counter##Scalar=0; counter##Scalar<FOUR_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 4; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * Shortcut For dfor(counter,3) * * The usage of this optimised shortcut differs from dfor: You have to * replace both the dfor and the opening bracket by this macro, i.e. * * \code * dfor(counter,3) { * \endcode * * becomes * * \code * dfor3(counter) * \endcode * * You usually use this macro with * \code * #pragma unroll(THREE_POWER_D) * \endcode * or * \code * #pragma omp parallel for schedule(static) * \endcode * * If you work with this specialised version of dfor on a variable k, two * counter variables are available within the loop's scope. The variable k * itself with type tarch::la::Vector<DIMENSIONS,int>. Furthermore, there's always a variable * kScalar giving you k's value linearised. */ #define dfor3(counter) \ for( int counter##Scalar=0; counter##Scalar<THREE_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 3; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #define dfor5(counter) \ for( int counter##Scalar=0; counter##Scalar<FIVE_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 5; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #define dfor7(counter) \ for( int counter##Scalar=0; counter##Scalar<SEVEN_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 7; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #define dfor9(counter) \ for( int counter##Scalar=0; counter##Scalar<NINE_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 9; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to two, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. It is * way slower than dfor if you compile with Dim2. * * Please use this macro with an enddforx macro closing your scope rather than * brackets. * * Please note that counterScalar is already a linearised version of your counter. * * Please note that you need a specialised linearisation function (depending on d * explicitly) to work with 2d index vectors within such a loop. Do not just use * dLinearised, but use the d2Linearised or d3Linearised variant instead. */ #define d2for(counter,max) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(2,max); counter##Scalar++) { \ tarch::la::Vector<2,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=2-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= max; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to two, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d2for2(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(2,2); counter##Scalar++) { \ tarch::la::Vector<2,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=2-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 2; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to three, we might nevertheless need * three-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d3for2(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(3,2); counter##Scalar++) { \ tarch::la::Vector<3,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=3-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 2; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to three, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. It is * way slower than dfor if you compile with Dim2. * * Please use this macro with an enddforx macro closing your scope rather than * brackets. * * Please note that counterScalar is already a linearised version of your counter. * * Please note that you need a specialised linearisation function (depending on d * explicitly) to work with 2d index vectors within such a loop. Do not just use * dLinearised, but use the d2Linearised or d3Linearised variant instead. */ #define d3for(counter,max) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(3,max); counter##Scalar++) { \ tarch::la::Vector<3,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=3-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= max; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to two, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d2for3(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(2,3); counter##Scalar++) { \ tarch::la::Vector<2,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=2-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 3; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to three, we might nevertheless need * three-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d3for3(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(3,3); counter##Scalar++) { \ tarch::la::Vector<3,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=3-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 3; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * Shortcut For dfor(counter,2) * * The usage of this optimised shortcut differs from dfor: You have to * replace both the dfor and the opening bracket by this macro, i.e. * * \code * dfor(counter,2) { * \endcode * * becomes * * \code * dfor2(counter) * \endcode * * You usually use this macro with * \code * #pragma unroll(TWO_POWER_D) * \endcode * or * \code * #pragma omp parallel for schedule(static) * \endcode * * If you work with this specialised version of dfor on a variable k, two * counter variables are available within the loop's scope. The variable k * itself with type tarch::la::Vector<DIMENSIONS,int>. Furthermore, there's always a variable * kScalar giving you k's value linearised. */ /* * bit flipping used for DIMENSIONS = 2, and DIMENSIONS = 3 * for more information about the idea principle used refer to https://opt-patterns.wiki.tum.de/dfor */ #if DIMENSIONS == 2 #define dfor2(counter) \ for( int counter##Scalar=0, AA##counter = 0, BB##counter = 0; counter##Scalar<TWO_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ counter(0) = AA##counter; \ counter(1) = BB##counter; \ AA##counter = !AA##counter; \ BB##counter = !(AA##counter ^ BB##counter); #elif DIMENSIONS == 3 #define dfor2(counter) \ for( int counter##Scalar=0, AA##counter = 0, BB##counter = 0, CC##counter = 0; counter##Scalar<TWO_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ counter(0) = AA##counter; \ counter(1) = BB##counter; \ counter(2) = CC##counter; \ AA##counter = !AA##counter; \ BB##counter = !(AA##counter ^ BB##counter); \ CC##counter = CC##counter || (!AA##counter && !BB##counter && !CC##counter); #else #define dfor2(counter) \ for( int counter##Scalar=0; counter##Scalar<TWO_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 2; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #endif /** * I prefer to use this macro for dforx instead of a closing bracket as many * syntax parser fail otherwise. */ #define enddforx } /** * This is an exclusive d-dimensional for loop. Exclusive means, there is one * dimension that is not manipulated during the for loop. This dimension * (entry of the counter) is specified by dim and has the value value * throughout the for-loop. */ #define dfore(counter,max,dim,value) \ for (tarch::la::Vector<DIMENSIONS,int> counter = peano::utils::dStartVector(dim,value); peano::utils::dCmp(counter,max); peano::utils::dInc(counter,max,dim) ) /** * This is a d-dimensional z-loop. A z-loop is a d-dimensional loop the * counter direction changes everytime an inner loop direction has changed. * So this is the loop corresponding to a Peano curve. The for loop is passed * a counter name, the number of steps to perform in each direction and a * direction flag that identifies the initial direction. Note that this * argument has to be a real variable, it might not be a constant. The * direction flag array identifies for each direction, whether the initial * loop goes along the axis or not. The type of direction is LoopDirection. * * Here are some examples for two dimensions: * \code * LoopDirection d(3); // equals {true,true} and identifies the standard * // Peano Leitmotiv * zfor( a, 3, d ) { * std::cout << a; * } * \endcode * yields in [0,0],[1,0],[2,0],[2,1],[1,1],[0,1],[0,2],[1,2],[2,2]. * * \code * LoopDirection d(1); // equals {true, false} and specifies a Peano curve * // from the left top to right bottom * zfor( a, 3, d ) { * std::cout << a; * } * \endcode * yields in [0,2],[1,2],[2,2],[2,1],[1,1],[0,1],[0,0],[1,0],[2,0]. */ #define zfor(counter,max,direction) \ {for (tarch::la::Vector<DIMENSIONS,int> counter = peano::utils::dStartVector(max,direction); peano::utils::dCmp(counter,max); peano::utils::dInc(counter,max,direction) ) { /* * zfor3 is an optimized version of zfor for max = 3 * A lookup table is used for dim=2 and dim=3, for higher dimensions * the standard zfor is used instead */ #if DIMENSIONS == 2 static const int lookupzfor[4][9][2] = { {{2,2},{1,2},{0,2},{0,1},{1,1},{2,1},{2,0},{1,0},{0,0}}, {{0,2},{1,2},{2,2},{2,1},{1,1},{0,1},{0,0},{1,0},{2,0}}, {{2,0},{1,0},{0,0},{0,1},{1,1},{2,1},{2,2},{1,2},{0,2}}, {{0,0},{1,0},{2,0},{2,1},{1,1},{0,1},{0,2},{1,2},{2,2}} }; #define zfor3(counter, direction) \ { tarch::la::Vector<DIMENSIONS,int> counter; \ int counter##initDir = static_cast<int>(direction.to_ulong()); \ for (int counter##i = 0; counter##i < 9; ++counter##i) { \ counter(0) = lookupzfor[counter##initDir][counter##i][0]; \ counter(1) = lookupzfor[counter##initDir][counter##i][1]; #elif DIMENSIONS == 3 static const int lookupzfor[8][27][3] = { {{2,2,2},{1,2,2},{0,2,2},{0,1,2},{1,1,2},{2,1,2},{2,0,2},{1,0,2},{0,0,2},{0,0,1},{1,0,1},{2,0,1},{2,1,1},{1,1,1},{0,1,1},{0,2,1},{1,2,1},{2,2,1},{2,2,0},{1,2,0},{0,2,0},{0,1,0},{1,1,0},{2,1,0},{2,0,0},{1,0,0},{0,0,0}}, {{0,2,2},{1,2,2},{2,2,2},{2,1,2},{1,1,2},{0,1,2},{0,0,2},{1,0,2},{2,0,2},{2,0,1},{1,0,1},{0,0,1},{0,1,1},{1,1,1},{2,1,1},{2,2,1},{1,2,1},{0,2,1},{0,2,0},{1,2,0},{2,2,0},{2,1,0},{1,1,0},{0,1,0},{0,0,0},{1,0,0},{2,0,0}}, {{2,0,2},{1,0,2},{0,0,2},{0,1,2},{1,1,2},{2,1,2},{2,2,2},{1,2,2},{0,2,2},{0,2,1},{1,2,1},{2,2,1},{2,1,1},{1,1,1},{0,1,1},{0,0,1},{1,0,1},{2,0,1},{2,0,0},{1,0,0},{0,0,0},{0,1,0},{1,1,0},{2,1,0},{2,2,0},{1,2,0},{0,2,0}}, {{0,0,2},{1,0,2},{2,0,2},{2,1,2},{1,1,2},{0,1,2},{0,2,2},{1,2,2},{2,2,2},{2,2,1},{1,2,1},{0,2,1},{0,1,1},{1,1,1},{2,1,1},{2,0,1},{1,0,1},{0,0,1},{0,0,0},{1,0,0},{2,0,0},{2,1,0},{1,1,0},{0,1,0},{0,2,0},{1,2,0},{2,2,0}}, {{2,2,0},{1,2,0},{0,2,0},{0,1,0},{1,1,0},{2,1,0},{2,0,0},{1,0,0},{0,0,0},{0,0,1},{1,0,1},{2,0,1},{2,1,1},{1,1,1},{0,1,1},{0,2,1},{1,2,1},{2,2,1},{2,2,2},{1,2,2},{0,2,2},{0,1,2},{1,1,2},{2,1,2},{2,0,2},{1,0,2},{0,0,2}}, {{0,2,0},{1,2,0},{2,2,0},{2,1,0},{1,1,0},{0,1,0},{0,0,0},{1,0,0},{2,0,0},{2,0,1},{1,0,1},{0,0,1},{0,1,1},{1,1,1},{2,1,1},{2,2,1},{1,2,1},{0,2,1},{0,2,2},{1,2,2},{2,2,2},{2,1,2},{1,1,2},{0,1,2},{0,0,2},{1,0,2},{2,0,2}}, {{2,0,0},{1,0,0},{0,0,0},{0,1,0},{1,1,0},{2,1,0},{2,2,0},{1,2,0},{0,2,0},{0,2,1},{1,2,1},{2,2,1},{2,1,1},{1,1,1},{0,1,1},{0,0,1},{1,0,1},{2,0,1},{2,0,2},{1,0,2},{0,0,2},{0,1,2},{1,1,2},{2,1,2},{2,2,2},{1,2,2},{0,2,2}}, {{0,0,0},{1,0,0},{2,0,0},{2,1,0},{1,1,0},{0,1,0},{0,2,0},{1,2,0},{2,2,0},{2,2,1},{1,2,1},{0,2,1},{0,1,1},{1,1,1},{2,1,1},{2,0,1},{1,0,1},{0,0,1},{0,0,2},{1,0,2},{2,0,2},{2,1,2},{1,1,2},{0,1,2},{0,2,2},{1,2,2},{2,2,2}} }; #define zfor3(counter, direction) \ { tarch::la::Vector<DIMENSIONS,int> counter; \ int counter##initDir = static_cast<int>(direction.to_ulong()); \ for (int counter##i = 0; counter##i < 27; ++counter##i) { \ counter(0) = lookupzfor[counter##initDir][counter##i][0]; \ counter(1) = lookupzfor[counter##initDir][counter##i][1]; \ counter(2) = lookupzfor[counter##initDir][counter##i][2]; #else #define zfor3(counter, direction) \ zfor(counter, 3, direction) #endif #define endzfor }} #endif
buggy_version.c
#include<stdio.h> int main(){ int sum = 1; int i; // increase sum by 10 using openmp #pragma omp parallel for shared (i) reduction (+: sum) for ( i = 0; i < 10; i++) { sum +=i; } }
GB_binop__bclr_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_uint16 // A.*B function (eWiseMult): GB_AemultB__bclr_uint16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_uint16 // C+=b function (dense accum): GB_Cdense_accumb__bclr_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_uint16 // C=scalar+B GB_bind1st__bclr_uint16 // C=scalar+B' GB_bind1st_tran__bclr_uint16 // C=A+scalar GB_bind2nd__bclr_uint16 // C=A'+scalar GB_bind2nd_tran__bclr_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITCLR (x, y, uint16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bclr_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint8 // op(A') function: GB_tran__abs_int64_uint8 // C type: int64_t // A type: uint8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint8 ( int64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sample-5.c
#include <stdio.h> #include <malloc.h> #include <omp.h> // Nhap cac kich thuoc cua ma tran #define m 9 #define n 6 #define p 6 //================================== void DisplayMatrix(int *A, int row, int col){ int i,j; for(i=0;i<row;i++){ for(j=0;j<col;j++) printf("%d\t",*(A+i*col+j)); printf("\n"); } } //================================== int main(int argc, char *argv[]) { int i, j, k; //================================== // Khai bao ma tran A, B, C int *A, *B, *C; A = (int *) malloc ((m*n)*sizeof(int)); B = (int *) malloc ((n*p)*sizeof(int)); C = (int *) malloc ((m*p)*sizeof(int)); //================================== // Khoi Tao Du Lieu (Ma tran A, B) //Ma tran A la ma tran Don vi for ( i = 0 ; i < m ; i++ ) for ( j = 0 ; j < n ; j++ ){ if (i==j) *(A+i*n+j) = 1; else *(A+i*n+j) = 0; } // printf("The matrix A:\n"); // DisplayMatrix(A, m, n); //Ma tran B for ( i = 0 ; i < n ; i++ ) for ( j = 0 ; j < p ; j++ ){ *(B+i*p+j) = 1*(i*p+j); } printf("The matrix B\n"); DisplayMatrix(B, n, p); //================================== int Nr_of_Threads=3; int id,m_thread,p_thread,start_i,start_k,stop_i,stop_k; #pragma omp parallel private(id,i,j,k,m_thread,start_i,start_k,stop_i,stop_k) // [NOTE]: OpenMP only parallelize partially { // [NOTE]: must enter to new line. id=omp_get_thread_num(); // Xac dinh doman cho moi thread. m_thread=m/Nr_of_Threads; p_thread=p; start_i = m_thread*id; start_k=0; stop_i=m_thread*(id+1); stop_k=p; // Nhan Ma Tran: A x B = C for ( i = start_i; i < stop_i; i++ ) for ( k = start_k; k < stop_k ; k++ ){ *(C + i*p + k) = 0; for ( j = 0 ; j < n ; j++ ) *(C + i*p + k) = *(C + i*p + k) + (*(A + i*n + j)) * (*(B + j*p + k)); } } //================================== printf( "Ma tran C:\n"); DisplayMatrix(C, m, p); //================================== return 0; } // 3 cách chia miền: // - 1 luồng tính dải cột liên tục. // - 2 luồng tính dải hàng liên tục. // - 3 tính rời rạc (cột, hàng).
_phonopy.c
/* Copyright (C) 2011 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <math.h> #include <float.h> #include <numpy/arrayobject.h> #include <dynmat.h> #include <derivative_dynmat.h> #include <kgrid.h> #include <tetrahedron_method.h> #define KB 8.6173382568083159E-05 /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args); static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args); static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args); static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args); static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args); static PyObject * py_distribute_fc2_with_mappings(PyObject *self, PyObject *args); static PyObject * py_compute_permutation(PyObject *self, PyObject *args); static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args); static void distribute_fc2_with_mappings(double (*fc2)[3][3], const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int * permutations, const int * map_atoms, const int * map_syms, const int num_rot, const int num_pos); static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec); static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec); static PyObject * py_thm_neighboring_grid_points(PyObject *self, PyObject *args); static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args); static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args); static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args); static double get_free_energy_omega(const double temperature, const double omega); static double get_entropy_omega(const double temperature, const double omega); static double get_heat_capacity_omega(const double temperature, const double omega); static void set_index_permutation_symmetry_fc(double * fc, const int natom); static void set_translational_symmetry_fc(double * fc, const int natom); static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose); static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom); /* static double get_energy_omega(double temperature, double omega); */ static int nint(const double a); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject * error_out(PyObject *m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"dipole_dipole", py_get_dipole_dipole, METH_VARARGS, "Dipole-dipole interaction"}, {"dipole_dipole_q0", py_get_dipole_dipole_q0, METH_VARARGS, "q=0 terms of Dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2_with_mappings", py_distribute_fc2_with_mappings, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_copy_smallest_vectors", py_gsv_copy_smallest_vectors, METH_VARARGS, "Implementation detail of get_smallest_vectors."}, {"neighboring_grid_points", py_thm_neighboring_grid_points, METH_VARARGS, "Neighboring grid points by relative grid addresses"}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"get_tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL }; #define INITERROR return NULL PyObject * PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_dynamical_matrices; PyArrayObject* py_commensurate_points; PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2pp_map; PyArrayObject* py_fc_index_map; double* fc; double* dm; double (*comm_points)[3]; double (*shortest_vectors)[27][3]; double* masses; int* multiplicities; int* s2pp_map; int* fc_index_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double*)PyArray_DATA(py_force_constants); dm = (double*)PyArray_DATA(py_dynamical_matrices); comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points); shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); masses = (double*)PyArray_DATA(py_masses); multiplicities = (int*)PyArray_DATA(py_multiplicities); s2pp_map = (int*)PyArray_DATA(py_s2pp_map); fc_index_map = (int*)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multiplicities)[1]; num_satom = PyArray_DIMS(py_multiplicities)[0]; dym_transform_dynmat_to_fc(fc, dm, comm_points, shortest_vectors, multiplicities, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject * py_compute_permutation(PyObject *self, PyObject *args) { PyArrayObject* permutation; PyArrayObject* lattice; PyArrayObject* positions; PyArrayObject* permuted_positions; double symprec; int* rot_atoms; double (*lat)[3]; double (*pos)[3]; double (*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int*)PyArray_DATA(permutation); lat = (double(*)[3])PyArray_DATA(lattice); pos = (double(*)[3])PyArray_DATA(positions); rot_pos = (double(*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); return Py_BuildValue("i", is_found); } static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args) { PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_vectors; PyArrayObject* py_lengths; double symprec; double (*shortest_vectors)[27][3]; double (*vectors)[27][3]; double (*lengths)[27]; int * multiplicity; int size_super, size_prim; if (!PyArg_ParseTuple(args, "OOOOd", &py_shortest_vectors, &py_multiplicity, &py_vectors, &py_lengths, &symprec)) { return NULL; } shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); vectors = (double(*)[27][3])PyArray_DATA(py_vectors); lengths = (double(*)[27])PyArray_DATA(py_lengths); size_super = PyArray_DIMS(py_vectors)[0]; size_prim = PyArray_DIMS(py_vectors)[1]; gsv_copy_smallest_vectors(shortest_vectors, multiplicity, vectors, lengths, size_super * size_prim, symprec); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args) { PyArrayObject* force_constants; double *fc; int natom; if (!PyArg_ParseTuple(args, "O", &force_constants)) { return NULL; } fc = (double*)PyArray_DATA(force_constants); natom = PyArray_DIMS(force_constants)[0]; set_index_permutation_symmetry_fc(fc, natom); set_translational_symmetry_fc(fc, natom); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; int level; double *fc; int *perms; int *s2pp; int *p2s; int *nsym_list; int n_patom, n_satom, i, j, k, l, n; double sum; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; if (level > 0) { for (n = 0; n < level; n++) { /* transpose only */ set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); for (i = 0; i < n_patom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } } } set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 0); set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom); Py_RETURN_NONE; } static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; double *fc; int *s2pp; int *p2s; int *nsym_list; int *perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; double* dm; double* fc; double* q; double (*svecs)[27][3]; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q_cart; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; double factor; double* dm; double* fc; double* q_cart; double* q; double (*svecs)[27][3]; double* m; double (*born)[3][3]; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; int n; double (*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q_cart = (double*)PyArray_DATA(py_q_cart); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); born = (double(*)[3][3])PyArray_DATA(py_born); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; dym_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args) { PyArrayObject* py_dd; PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_q_cart; PyArrayObject* py_q_direction; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double factor; double lambda; double tolerance; double* dd; double* dd_q0; double (*G_list)[3]; double* q_vector; double* q_direction; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double*)PyArray_DATA(py_dd); dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); if ((PyObject*)py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double*)PyArray_DATA(py_q_direction); } q_vector = (double*)PyArray_DATA(py_q_cart); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args) { PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double lambda; double tolerance; double* dd_q0; double (*G_list)[3]; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args) { PyArrayObject* derivative_dynmat; PyArrayObject* py_force_constants; PyArrayObject* r_vector; PyArrayObject* lattice; PyArrayObject* q_vector; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; PyArrayObject* dielectric; PyArrayObject* q_direction; double nac_factor; double* ddm; double* fc; double* q; double* lat; double* r; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; double *z; double *epsilon; double *q_dir; if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO", &derivative_dynmat, &py_force_constants, &q_vector, &lattice, /* column vectors */ &r_vector, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &dielectric, &q_direction)) { return NULL; } ddm = (double*)PyArray_DATA(derivative_dynmat); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(q_vector); lat = (double*)PyArray_DATA(lattice); r = (double*)PyArray_DATA(r_vector); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject*)py_born == Py_None) { z = NULL; } else { z = (double*)PyArray_DATA(py_born); } if ((PyObject*)dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double*)PyArray_DATA(dielectric); } if ((PyObject*)q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)PyArray_DATA(q_direction); } get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q, lat, r, multi, m, s2p_map, p2s_map, nac_factor, z, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args) { PyArrayObject* py_thermal_props; PyArrayObject* py_temperatures; PyArrayObject* py_frequencies; PyArrayObject* py_weights; double *temperatures; double* freqs; double *thermal_props; int* w; int num_qpoints; int num_bands; int num_temp; int i, j, k; long sum_weights; double omega; double *tp; if (!PyArg_ParseTuple(args, "OOOO", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights)) { return NULL; } thermal_props = (double*)PyArray_DATA(py_thermal_props); temperatures = (double*)PyArray_DATA(py_temperatures); num_temp = PyArray_DIMS(py_temperatures)[0]; freqs = (double*)PyArray_DATA(py_frequencies); num_qpoints = PyArray_DIMS(py_frequencies)[0]; w = (int*)PyArray_DATA(py_weights); num_bands = PyArray_DIMS(py_frequencies)[1]; for (i = 0; i < num_temp * 3; i++) { thermal_props[i] = 0; } tp = (double*)malloc(sizeof(double) * num_qpoints * num_temp * 3); for (i = 0; i < num_qpoints * num_temp * 3; i++) { tp[i] = 0; } #pragma omp parallel for private(j, k, omega) for (i = 0; i < num_qpoints; i++){ for (j = 0; j < num_temp; j++) { for (k = 0; k < num_bands; k++){ omega = freqs[i * num_bands + k]; if (temperatures[j] > 0 && omega > 0.0) { tp[i * num_temp * 3 + j * 3] += get_free_energy_omega(temperatures[j], omega) * w[i]; tp[i * num_temp * 3 + j * 3 + 1] += get_entropy_omega(temperatures[j], omega) * w[i]; tp[i * num_temp * 3 + j * 3 + 2] += get_heat_capacity_omega(temperatures[j], omega)* w[i]; } } } } for (i = 0; i < num_temp * 3; i++) { for (j = 0; j < num_qpoints; j++) { thermal_props[i] += tp[j * num_temp * 3 + i]; } } free(tp); sum_weights = 0; #pragma omp parallel for reduction(+:sum_weights) for (i = 0; i < num_qpoints; i++){ sum_weights += w[i]; } for (i = 0; i < num_temp * 3; i++) { thermal_props[i] /= sum_weights; } Py_RETURN_NONE; } static PyObject * py_distribute_fc2_with_mappings(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_permutations; PyArrayObject* py_map_atoms; PyArrayObject* py_map_syms; PyArrayObject* py_atom_list; PyArrayObject* py_rotations_cart; double (*r_carts)[3][3]; double (*fc2)[3][3]; int *permutations; int *map_atoms; int *map_syms; int *atom_list; int num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int*)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int*)PyArray_DATA(py_permutations); map_atoms = (int*)PyArray_DATA(py_map_atoms); map_syms = (int*)PyArray_DATA(py_map_syms); r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } distribute_fc2_with_mappings(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject *py_thm_neighboring_grid_points(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_points; PyArrayObject* py_relative_grid_address; PyArrayObject* py_mesh; PyArrayObject* py_bz_grid_address; PyArrayObject* py_bz_map; int grid_point; int* relative_grid_points; int (*relative_grid_address)[3]; int num_relative_grid_address; int *mesh; int (*bz_grid_address)[3]; int *bz_map; if (!PyArg_ParseTuple(args, "OiOOOO", &py_relative_grid_points, &grid_point, &py_relative_grid_address, &py_mesh, &py_bz_grid_address, &py_bz_map)) { return NULL; } relative_grid_points = (int*)PyArray_DATA(py_relative_grid_points); relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0]; mesh = (int*)PyArray_DATA(py_mesh); bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address); bz_map = (int*)PyArray_DATA(py_bz_map); thm_get_neighboring_grid_points(relative_grid_points, grid_point, relative_grid_address, num_relative_grid_address, mesh, bz_grid_address, bz_map); Py_RETURN_NONE; } static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; PyArrayObject* py_reciprocal_lattice_py; int (*relative_grid_address)[4][3]; double (*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py); thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; int (*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (int(*)[24][4][3])PyArray_DATA(py_relative_grid_address); thm_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args) { double omega; PyArrayObject* py_tetrahedra_omegas; char* function; double (*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = thm_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args) { PyArrayObject* py_integration_weights; PyArrayObject* py_omegas; PyArrayObject* py_tetrahedra_omegas; char* function; double *omegas; double *iw; int num_omegas; double (*tetrahedra_omegas)[4]; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double*)PyArray_DATA(py_omegas); iw = (double*)PyArray_DATA(py_integration_weights); num_omegas = (int)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); thm_get_integration_weight_at_omegas(iw, num_omegas, omegas, tetrahedra_omegas, function[0]); Py_RETURN_NONE; } static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args) { PyArrayObject* py_freq_tetras; PyArrayObject* py_grid_points; PyArrayObject* py_mesh; PyArrayObject* py_grid_address; PyArrayObject* py_gp_ir_index; PyArrayObject* py_relative_grid_address; PyArrayObject* py_frequencies; double* freq_tetras; int* grid_points; int num_gp_in; int* mesh; int (*grid_address)[3]; int* gp_ir_index; int (*relative_grid_address)[3]; double* frequencies; int num_band; int is_shift[3] = {0, 0, 0}; int i, j, k, gp; int g_addr[3]; int address_double[3]; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double*)PyArray_DATA(py_freq_tetras); grid_points = (int*)PyArray_DATA(py_grid_points); num_gp_in = (int)PyArray_DIMS(py_grid_points)[0]; mesh = (int*)PyArray_DATA(py_mesh); grid_address = (int(*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (int*)PyArray_DATA(py_gp_ir_index); relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double*)PyArray_DATA(py_frequencies); num_band = (int)PyArray_DIMS(py_frequencies)[1]; for (i = 0; i < num_gp_in; i++) { #pragma omp parallel for private(k, g_addr, gp, address_double) for (j = 0; j < num_band * 96; j++) { for (k = 0; k < 3; k++) { g_addr[k] = grid_address[grid_points[i]][k] + relative_grid_address[j % 96][k]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); gp = kgd_get_grid_point_double_mesh(address_double, mesh); freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96]; } } Py_RETURN_NONE; } static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args) { PyArrayObject* py_dos; PyArrayObject* py_mesh; PyArrayObject* py_freq_points; PyArrayObject* py_frequencies; PyArrayObject* py_coef; PyArrayObject* py_grid_address; PyArrayObject* py_grid_mapping_table; PyArrayObject* py_relative_grid_address; double *dos; int* mesh; double* freq_points; int num_freq_points; double* frequencies; double* coef; int (*grid_address)[3]; int num_gp; int num_ir_gp; int num_coef; int num_band; int* grid_mapping_table; int (*relative_grid_address)[4][3]; int is_shift[3] = {0, 0, 0}; int i, j, k, l, m, q, r, count; int g_addr[3]; int ir_gps[24][4]; double tetrahedra[24][4]; int address_double[3]; int *gp2ir, *ir_grid_points, *weights; double iw; gp2ir = NULL; ir_grid_points = NULL; weights = NULL; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double*)PyArray_DATA(py_dos); mesh = (int*)PyArray_DATA(py_mesh); freq_points = (double*)PyArray_DATA(py_freq_points); num_freq_points = (int)PyArray_DIMS(py_freq_points)[0]; frequencies = (double*)PyArray_DATA(py_frequencies); num_ir_gp = (int)PyArray_DIMS(py_frequencies)[0]; num_band = (int)PyArray_DIMS(py_frequencies)[1]; coef = (double*)PyArray_DATA(py_coef); num_coef = (int)PyArray_DIMS(py_coef)[1]; grid_address = (int(*)[3])PyArray_DATA(py_grid_address); num_gp = (int)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (int*)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); gp2ir = (int*)malloc(sizeof(int) * num_gp); ir_grid_points = (int*)malloc(sizeof(int) * num_ir_gp); weights = (int*)malloc(sizeof(int) * num_ir_gp); count = 0; for (i = 0; i < num_gp; i++) { if (grid_mapping_table[i] == i) { gp2ir[i] = count; ir_grid_points[count] = i; weights[count] = 1; count++; } else { gp2ir[i] = gp2ir[grid_mapping_table[i]]; weights[gp2ir[i]]++; } } if (num_ir_gp != count) { printf("Something is wrong!\n"); } #pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, tetrahedra, address_double) for (i = 0; i < num_ir_gp; i++) { /* set 24 tetrahedra */ for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_grid_points[i]][r] + relative_grid_address[l][q][r]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)]; } } for (k = 0; k < num_band; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k]; } } for (j = 0; j < num_freq_points; j++) { iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i]; for (m = 0; m < num_coef; m++) { dos[i * num_band * num_freq_points * num_coef + k * num_coef * num_freq_points + j * num_coef + m] += iw * coef[i * num_coef * num_band + m * num_band + k]; } } } } free(gp2ir); gp2ir = NULL; free(ir_grid_points); ir_grid_points = NULL; free(weights); weights = NULL; Py_RETURN_NONE; } static double get_free_energy_omega(const double temperature, const double omega) { /* temperature is defined by T (K) */ /* omega must be normalized to eV. */ return KB * temperature * log(1 - exp(- omega / (KB * temperature))); } static double get_entropy_omega(const double temperature, const double omega) { /* temperature is defined by T (K) */ /* omega must be normalized to eV. */ double val; val = omega / (2 * KB * temperature); return 1 / (2 * temperature) * omega * cosh(val) / sinh(val) - KB * log(2 * sinh(val)); } static double get_heat_capacity_omega(const double temperature, const double omega) { /* temperature is defined by T (K) */ /* omega must be normalized to eV. */ /* If val is close to 1. Then expansion is used. */ double val, val1, val2; val = omega / (KB * temperature); val1 = exp(val); val2 = (val) / (val1 - 1); return KB * val1 * val2 * val2; } /* static double get_energy_omega(double temperature, double omega){ */ /* /\* temperature is defined by T (K) *\/ */ /* /\* omega must be normalized to eV. *\/ */ /* return omega / (exp(omega / (KB * temperature)) - 1); */ /* } */ static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec) { int i,j,k,l; int search_start; double distance2, diff_cart; double diff[3]; for (i = 0; i < num_pos; i++) { rot_atom[i] = -1; } /* optimization: Iterate primarily by pos instead of rot_pos. */ /* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */ /* Then track the first unassigned index. */ /* */ /* This works best if the permutation is close to the identity. */ /* (more specifically, if the max value of 'rot_atom[i] - i' is small) */ search_start = 0; for (i = 0; i < num_pos; i++) { while (rot_atom[search_start] >= 0) { search_start++; } for (j = search_start; j < num_pos; j++) { if (rot_atom[j] >= 0) { continue; } for (k = 0; k < 3; k++) { diff[k] = pos[i][k] - rot_pos[j][k]; diff[k] -= nint(diff[k]); } distance2 = 0; for (k = 0; k < 3; k++) { diff_cart = 0; for (l = 0; l < 3; l++) { diff_cart += lat[k][l] * diff[l]; } distance2 += diff_cart * diff_cart; } if (sqrt(distance2) < symprec) { rot_atom[j] = i; break; } } } for (i = 0; i < num_pos; i++) { if (rot_atom[i] < 0) { printf("Encounter some problem in compute_permutation.\n"); return 0; } } return 1; } /* Implementation detail of get_smallest_vectors. */ /* Finds the smallest vectors within each list and copies them to the output. */ static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec) { int i,j,k; int count; double minimum; double (*vectors)[3]; double * lengths; for (i = 0; i < num_lists; i++) { /* Look at a single list of 27 vectors. */ lengths = length_lists[i]; vectors = vector_lists[i]; /* Compute the minimum length. */ minimum = DBL_MAX; for (j = 0; j < 27; j++) { if (lengths[j] < minimum) { minimum = lengths[j]; } } /* Copy vectors whose length is within tolerance. */ count = 0; for (j = 0; j < 27; j++) { if (lengths[j] - minimum <= symprec) { for (k = 0; k < 3; k++) { shortest_vectors[i][count][k] = vectors[j][k]; } count++; } } multiplicity[i] = count; } } /* Distributes all force constants using precomputed data about symmetry mappings. */ static void distribute_fc2_with_mappings(double (*fc2)[3][3], /* shape[num_pos][num_pos] */ const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], /* shape[num_rot] */ const int * permutations, /* shape[num_rot][num_pos] */ const int * map_atoms, /* shape [num_pos] */ const int * map_syms, /* shape [num_pos] */ const int num_rot, const int num_pos) { int i, j, k, l, m; int atom_todo, atom_done, atom_other; int sym_index; double (*fc2_done)[3]; double (*fc2_todo)[3]; double (*r_cart)[3]; const int * permutation; for (i = 0; i < len_atom_list; i++) { /* look up how this atom maps into the done list. */ atom_todo = atom_list[i]; atom_done = map_atoms[atom_todo]; sym_index = map_syms[atom_todo]; /* skip the atoms in the done list, */ /* which are easily identified because they map to themselves. */ if (atom_todo == atom_done) { continue; } /* look up information about the rotation */ r_cart = r_carts[sym_index]; permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */ /* distribute terms from atom_done to atom_todo */ for (atom_other = 0; atom_other < num_pos; atom_other++) { fc2_done = fc2[atom_done * num_pos + permutation[atom_other]]; fc2_todo = fc2[atom_todo * num_pos + atom_other]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { /* P' = R^-1 P R */ fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m]; } } } } } } } static void set_index_permutation_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m, n; for (i = 0; i < natom; i++) { /* non diagonal part */ for (j = i + 1; j < natom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i * natom * 9 + j * 9 + k * 3 + l; n = j * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } /* diagnoal part */ for (k = 0; k < 2; k++) { for (l = k + 1; l < 3; l++) { m = i * natom * 9 + i * 9 + k * 3 + l; n = i * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } } static void set_translational_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m; double sums[3][3]; for (i = 0; i < natom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i * natom * 9 + k * 3 + l; for (j = 0; j < natom; j++) { if (i != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose) { int i, j, k, l, m, n, i_p, j_p, i_trans; double fc_elem; char *done; done = NULL; done = (char*)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; if (i == j) { /* diagnoal part */ for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { if (l > k) { m = i_p * n_satom * 9 + i * 9 + k * 3 + l; n = i_p * n_satom * 9 + i * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[m] + fc[n]) / 2; fc[n] = fc[m]; } } } } } if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i_p * n_satom * 9 + j * 9 + k * 3 + l; n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[n] + fc[m]) / 2; fc[n] = fc[m]; } } } } } } free(done); done = NULL; } static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom) { int j, k, l, m, i_p; double sums[3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i_p * n_satom * 9 + k * 3 + l; for (j = 0; j < n_satom; j++) { if (p2s[i_p] != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static int nint(const double a) { if (a < 0.0) return (int) (a - 0.5); else return (int) (a + 0.5); }
omp_sum_strnum_tls.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <unistd.h> #include "stdalsp.h" #define LEN_SUM_STR 16 char *sum_strnum(const char *, const char *); int main() { omp_set_num_threads(4); { #pragma omp parallel sections { #pragma omp section { char *x = "1", *y = "3"; char *ret_str = sum_strnum(x, y); pr_out("[T:%d] %s + %s = %s (%p)", omp_get_thread_num(), x, y, ret_str, ret_str); sleep(1); } #pragma omp section { char *x = "4", *y = "4"; char *ret_str = sum_strnum(x, y); pr_out("[T:%d] %s + %s = %s (%p)", omp_get_thread_num(), x, y, ret_str, ret_str); sleep(1); } #pragma omp section { char *x = "1", *y = "5"; char *ret_str = sum_strnum(x, y); pr_out("[T:%d] %s + %s = %s (%p)", omp_get_thread_num(), x, y, ret_str, ret_str); sleep(1); } } } return 0; } char *sum_strnum(const char *s1, const char *s2) { static char tls_str[LEN_SUM_STR]; #pragma omp threadprivate(tls_str) snprintf(tls_str, LEN_SUM_STR, "%d", atoi(s1) + atoi(s2)); return tls_str; }
test-double-libmvec-sincos-main.c
/* Test for vector sincos ABI. Copyright (C) 2016-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <https://www.gnu.org/licenses/>. */ #include <math.h> #define N 1000 double x[N], s[N], c[N]; double* s_ptrs[N]; double* c_ptrs[N]; int test_sincos_abi (void) { int i; for(i = 0; i < N; i++) { x[i] = i / 3; s_ptrs[i] = &s[i]; c_ptrs[i] = &c[i]; } #pragma omp simd for(i = 0; i < N; i++) sincos (x[i], s_ptrs[i], c_ptrs[i]); return 0; }
fasta.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // contributed by Jeremy Zerfas // rewritten by Аноним Легионов, inspired by fasta Rust #2 program // use two OpenMP locks instead of one critical section // decouples IO activity from random number generation // // modified by Josh Goldfoot, adding use of a buffer for fasta_repeat // This controls the width of lines that are output by this program. #define MAXIMUM_LINE_WIDTH 60 // This program will generate the random nucleotide sequences in parallel which // are worked on in blocks of lines. The number of lines in those blocks is // controlled by this setting. #define LINES_PER_BLOCK 1024 #define CHARACTERS_PER_BLOCK (MAXIMUM_LINE_WIDTH*LINES_PER_BLOCK) #define THREADS_TO_USE 4 #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct { char letter; float probability; } nucleotide_info; // Repeatedly print string_To_Repeat until it has printed // number_Of_Characters_To_Create. The output is also wrapped to // MAXIMUM_LINE_WIDTH columns. static char* make_Sequence_Buffer(const char string_To_Repeat[]) { const intnative_t string_To_Repeat_Length = strlen(string_To_Repeat); // JG: Changed to that this writes a sequence to a buffer, which is used intnative_t number_Of_Characters_To_Create = string_To_Repeat_Length * MAXIMUM_LINE_WIDTH; char* buffer = (char*) malloc(number_Of_Characters_To_Create + number_Of_Characters_To_Create / MAXIMUM_LINE_WIDTH + 1); if (buffer == NULL) exit(-1); char* bufferOffset = buffer; // Create an extended_String_To_Repeat which is a copy of string_To_Repeat // but extended with another copy of the first MAXIMUM_LINE_WIDTH characters // of string_To_Repeat appended to the end. Later on this allows us to // generate a line of output just by doing simple memory copies using an // appropriate offset into extended_String_To_Repeat. char extended_String_To_Repeat[string_To_Repeat_Length + MAXIMUM_LINE_WIDTH]; for (intnative_t column = 0; column<string_To_Repeat_Length + MAXIMUM_LINE_WIDTH; column++) extended_String_To_Repeat[column] = string_To_Repeat[column%string_To_Repeat_Length]; intnative_t offset = 0; char line[MAXIMUM_LINE_WIDTH + 1]; line[MAXIMUM_LINE_WIDTH] = '\n'; for (intnative_t current_Number_Of_Characters_To_Create = number_Of_Characters_To_Create; current_Number_Of_Characters_To_Create>0;) { // Figure out the length of the line we need to write. If it's less than // MAXIMUM_LINE_WIDTH then we also need to add a line feed in the right // spot too. intnative_t line_Length = MAXIMUM_LINE_WIDTH; if (current_Number_Of_Characters_To_Create<MAXIMUM_LINE_WIDTH) { line_Length = current_Number_Of_Characters_To_Create; line[line_Length] = '\n'; } memcpy(line, extended_String_To_Repeat + offset, line_Length); // Update the offset, reducing it by string_To_Repeat_Length if // necessary. offset += line_Length; if (offset>string_To_Repeat_Length) offset -= string_To_Repeat_Length; // "Output" that line to our buffer and update the // current_Number_Of_Characters_To_Create. memcpy(bufferOffset, line, line_Length + 1); // JG: used to be fwrite(line, line_Length + 1, 1, stdout); bufferOffset += line_Length + 1; current_Number_Of_Characters_To_Create -= line_Length; } *bufferOffset = 0; return buffer; } void repeat_And_Wrap_String(const char string_To_Repeat[], intnative_t number_Of_Characters_To_Create) { /* JG: fasta_repeat repeats every len(alu) * line-length = 287 * 61 = 17507 characters. So, calculate this once, then just print that buffer over and over. */ char* sequence = make_Sequence_Buffer(string_To_Repeat); intnative_t sequenceLen = (intnative_t) strlen(sequence); intnative_t outputBytes = number_Of_Characters_To_Create + number_Of_Characters_To_Create / 60; while (outputBytes >= sequenceLen) { fwrite(sequence, sequenceLen, 1, stdout); outputBytes -= sequenceLen; } if (outputBytes > 0) { fwrite(sequence, outputBytes, 1, stdout); printf("\n"); } free(sequence); } // Generate a pseudorandom number from 0 to max using a linear // congruential generator. #define IM 139968 #define IA 3877 #define IC 29573 uint32_t seed = 42; int rng_tid; //Thread ID int rng_tnum = 1; //Thread number intnative_t rng_cnt = 0; #ifdef _OPENMP omp_lock_t rng_lock; #define RNG_LOCK_INIT() omp_init_lock(&rng_lock) #define RNG_LOCK() omp_set_lock(&rng_lock) #define RNG_FREE() omp_unset_lock(&rng_lock) #else #define RNG_LOCK_INIT() do{}while(0) #define RNG_LOCK() do{}while(0) #define RNG_FREE() do{}while(0) #endif static void rng_init(void) { RNG_LOCK_INIT(); rng_tid = 0; } static intnative_t rng_gen_blk(uint32_t * buf, intnative_t len, int curr_tid) { intnative_t gen_cnt = -1;//Error by default RNG_LOCK(); if (rng_tid == curr_tid) { if (++rng_tid >= rng_tnum) { rng_tid = 0; } gen_cnt = (len<rng_cnt) ? len : rng_cnt; rng_cnt -= gen_cnt; len = gen_cnt; while (0 != len--) { seed = (seed*IA + IC) % IM; *(buf++) = seed;//This is stupid actually! } } RNG_FREE(); return gen_cnt; } int out_tid; //Thread ID int out_tnum = 1; //Thread number #ifdef _OPENMP omp_lock_t out_lock; #define OUT_LOCK_INIT() omp_init_lock(&out_lock) #define OUT_LOCK() omp_set_lock(&out_lock) #define OUT_FREE() omp_unset_lock(&out_lock) #else #define OUT_LOCK_INIT() do{}while(0) #define OUT_LOCK() do{}while(0) #define OUT_FREE() do{}while(0) #endif static void out_init(void) { OUT_LOCK_INIT(); rng_tid = 0; } static intnative_t out_write(char * buf, intnative_t len, int curr_tid) { intnative_t wr_cnt = -1;//Error by default OUT_LOCK(); if (out_tid == curr_tid) { if (++out_tid >= out_tnum) { out_tid = 0; } wr_cnt = fwrite(buf, len, 1, stdout); } OUT_FREE(); return wr_cnt; //-1 - thread error, 0 - IO error, 1 - ОК } static void generate_And_Wrap_Pseudorandom_DNA_Sequence( const nucleotide_info nucl_info[], const intnative_t nucl_num, const intnative_t char_num) { uint32_t cumul_p[nucl_num]; float cumul_acc = 0.0; for (intnative_t i = 0; i<nucl_num; i++) { cumul_acc += nucl_info[i].probability; cumul_p[i] = 1ul + (uint32_t)(cumul_acc*(float)IM); //Compensate rounding errors on test file } #ifdef _OPENMP intnative_t tnum = omp_get_num_procs(); if (tnum>THREADS_TO_USE) tnum = THREADS_TO_USE; omp_set_num_threads(tnum); rng_tnum = tnum; out_tnum = tnum; #endif rng_tid = 0; out_tid = 0; rng_cnt = char_num; #pragma omp parallel { char block[CHARACTERS_PER_BLOCK + LINES_PER_BLOCK]; char * line; uint32_t rnd[CHARACTERS_PER_BLOCK], r; intnative_t cnt, col, prid, nid, ncnt; int cur_tid; #ifdef _OPENMP cur_tid = omp_get_thread_num(); #else cur_tid = 0; #endif while (1) { do { cnt = rng_gen_blk(rnd, CHARACTERS_PER_BLOCK, cur_tid); } while (-1 == cnt); if (0 == cnt) { break;//Work finished! } line = block; for (col = 0, prid = 0; prid < cnt; prid++) { r = rnd[prid]; ncnt = 0; for (nid = 0; nid < nucl_num; nid++) { if (cumul_p[nid] <= r) { ncnt++; } } *line++ = nucl_info[ncnt].letter; if (++col >= MAXIMUM_LINE_WIDTH) { col = 0; *line++ = '\n'; } } //Check if we need to end the line if (0 != col) { //Last iteration didn't end the line, so finish the job. *line++ = '\n'; } //Print results do { cnt = out_write(block, line - block, cur_tid); } while (-1 == cnt); //Check fot IO error if (0 == cnt) { exit(1); } } } } int main(int argc, char ** argv) { const intnative_t n = atoi(argv[1]); fputs(">ONE Homo sapiens alu\n", stdout); const char homo_Sapiens_Alu[] = "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTC" "AGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCG" "TGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGG" "AGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA"; repeat_And_Wrap_String(homo_Sapiens_Alu, 2 * n); rng_init(); out_init(); fputs(">TWO IUB ambiguity codes\n", stdout); nucleotide_info iub_Nucleotides_Information[] = { { 'a', 0.27 },{ 'c', 0.12 },{ 'g', 0.12 },{ 't', 0.27 },{ 'B', 0.02 }, { 'D', 0.02 },{ 'H', 0.02 },{ 'K', 0.02 },{ 'M', 0.02 },{ 'N', 0.02 }, { 'R', 0.02 },{ 'S', 0.02 },{ 'V', 0.02 },{ 'W', 0.02 },{ 'Y', 0.02 } }; generate_And_Wrap_Pseudorandom_DNA_Sequence(iub_Nucleotides_Information, sizeof(iub_Nucleotides_Information) / sizeof(nucleotide_info), 3 * n); fputs(">THREE Homo sapiens frequency\n", stdout); nucleotide_info homo_Sapien_Nucleotides_Information[] = { { 'a', 0.3029549426680 },{ 'c', 0.1979883004921 }, { 'g', 0.1975473066391 },{ 't', 0.3015094502008 } }; generate_And_Wrap_Pseudorandom_DNA_Sequence(homo_Sapien_Nucleotides_Information, sizeof(homo_Sapien_Nucleotides_Information) / sizeof(nucleotide_info), 5 * n); return 0; } /* notes, command-line, and program output NOTES: 64-bit Ubuntu quad core gcc (Ubuntu 6.3.0-12ubuntu2) 6.3.0 20170406 Fri, 14 Apr 2017 17:25:19 GMT MAKE: /usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -std=c99 -mfpmath=sse -msse3 -fopenmp fasta.gcc-2.c -o fasta.gcc-2.gcc_run rm fasta.gcc-2.c 0.20s to complete and log all make actions COMMAND LINE: ./fasta.gcc-2.gcc_run 25000000 (TRUNCATED) PROGRAM OUTPUT: >ONE Homo sapiens alu GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT GGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA GGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA TTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG AATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA GCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGT AATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACC AGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTG GTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACC CGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAG AGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTT TGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACA TGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCT GTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGG TTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGT CTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG CGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCG TCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTA CTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCG AGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCG GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACC TGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAA TACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGA GGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACT GCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTC ACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGT TCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGC CGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCG CTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTG GGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCC CAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCT GGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGC GCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGA GGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGA GACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGA GGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTG AAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCA GTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAA AAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGC GGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCT ACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGG GAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATC GCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGC GGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGG TCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAA AAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAG GAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACT CCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCC TGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAG ACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGC GTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGA ACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGA CAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCA CTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCA ACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCG CCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGG AGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTC CGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCG AGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACC CCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAG CTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAG CCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGG CCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATC ACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAA AAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGC TGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCC ACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGG CTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGG AGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATT AGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAA TCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGC CTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAA TCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAG CCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGT GGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCG GGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAG CGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATG GTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGT AATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTT GCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCT CAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCG GGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTC TCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACT CGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAG ATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGG CGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTG AGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATA CAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGG CAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGC ACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCAC GCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTC GAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCG GGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCT TGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGG CGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCA GCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGG CCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGC GCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGG CGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGA CTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGG CCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAA ACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCC CAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGT GAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAA AGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGG ATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTAC TAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGA GGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGC GCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGG TGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTC AGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAA ATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGA GAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTG TAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGAC CAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGT GGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACA GAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACT TTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAAC ATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCC TGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAG GTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCG TCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAG GCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCC GTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCT ACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCC GAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCC GGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCAC CTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAA ATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTG AGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCAC TGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCT CACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAG TTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAG CCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATC GCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCT GGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATC CCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCC TGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGG CGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCG AGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGG AGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGT GAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAA TCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGC AGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCA AAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGG CGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTC TACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCG GGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGAT CGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCG CGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAG GTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACA AAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCA GGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCAC TCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGC CTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGG CGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTG AACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCG ACAGAGCGAGACTCCGTCTCAAAAAGGCCGG */
omp-loop-carried-depend.c
/***************************************************************************** Example : omp-loop-carried-depend.c Objective : Write in OpenMP progam for Loop-carried dependence using OpenMP parallel Directive . Input : a) Number of threads b) Size of Array Output : Status of the computation i.e the comparitive results of the serial and parallel computation. Author : RarchK *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <sys/time.h> int main(int argc,char **argv){ const double up = 1.1 ; double Sn, origSn=1000.0; double *opt,*dependency_opt,*no_dependency_opt; int n,Noofthreads,N; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP Program to demonstrate Loop-Carried Dependency "); printf("\n\t\t..........................................................................\n"); if( argc != 3 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <Threads> <array-size>\n"); exit(-1); } N = atoi(argv[2]); Noofthreads=atoi(argv[1]); if ((Noofthreads!=1) && (Noofthreads!=2) && (Noofthreads!=4) && (Noofthreads!=8) && (Noofthreads!= 16) ) { printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n"); exit(-1); } /* printf("\n\t\t Read The Array Size \n"); scanf("%d",&N);*/ printf("\n\t\t Threads : %d ",Noofthreads); printf("\n\t\t Array Size : %d \n ",N); opt = (double *)malloc( (N+1) *sizeof(double)); dependency_opt = (double *)malloc( (N+1) *sizeof(double)); no_dependency_opt = (double *)malloc( (N+1) *sizeof(double)); /* Serial Computation in which the for loop contains Loop-Carried Depedency.These dependencies are so named because variables depend on previous iterations within a loop. To parallelize the loop, the dependency must be resolved . */ Sn = 1000.0; for (n=0; n<=N; ++n) { opt[n] = Sn; Sn *= up; } //for (n=0; n<=N; ++n) // printf("opt[%d]= %d\n",n,opt[n]); Sn = 1000.0; /* The above for Loop that is existing with Loop-Carried Dependency is parallelised */ omp_set_num_threads(Noofthreads); #pragma omp parallel for private(n) for (n=0; n<=N; ++n) { dependency_opt[n] = Sn; Sn *= up; } for (n=0; n<=N; ++n) { if ( opt[n] == dependency_opt[n]){} else{ printf("\n\t\tIncorrect results found when Serial computation results & Parallel computation\n\t\twith Dependency results are compared\n"); break; } } Sn = 1000.0; no_dependency_opt[0] = origSn; /*The For Loop is parallelised after resolving the dependency by pushing out the update variable Sn*/ omp_set_num_threads(Noofthreads); #pragma omp parallel for private(n) lastprivate(Sn) for (n=1; n<=N; ++n) { Sn = origSn * pow(up, n); no_dependency_opt[n] = Sn; } Sn *= up; for (n=0; n<=N; ++n) { if ( (int)(opt[n]) == (int)(no_dependency_opt[n])){} else{ printf("%lf != %lf\n",opt[n],no_dependency_opt[n]); printf("\n\t\t Incorrect results found when Serial computation results & Parallel computation \n\t\twithout Dependency results are compared\n"); break; } } //for (n=0; n<=N; ++n) //printf("no_opt[%d]= %d\n",n,no_dependency_opt[n]); printf("\n\t\t..........................................................................\n"); free(opt); free(dependency_opt); free(no_dependency_opt); }
DRB092-threadprivatemissing2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. No threadprivate is used to avoid data races. This is the case for a variable referenced within a construct. Data race pairs sum0@68:7 vs. sum0@68:12 sum0@68:7 vs. sum0@68:7 */ #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; //#pragma omp threadprivate(sum0) int main() { int i, sum=0; #pragma omp parallel { #pragma omp for private(i) reduction(+:sum0) for (i=1;i<=1000;i++) { sum0=sum0+i; } } sum= sum+sum0; /* reference calculation */ #pragma omp parallel for private(i) reduction(+:sum1) for (i=1;i<=1000;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); // assert(sum==sum1); return 0; }
kernel.h
/* * This file contains the implementation of a kernel for the * point-in-polygon problem using the crossing number algorithm * * The kernel pnpoly_base is used for correctness checking. * * The algorithm used here is adapted from: * 'Inclusion of a Point in a Polygon', Dan Sunday, 2001 * (http://geomalgorithms.com/a03-_inclusion.html) * * Author: Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> */ /* * The is_between method returns a boolean that is True when the a is between c and b. */ #pragma omp declare target inline int is_between(float a, float b, float c) { return (b > a) != (c > a); } #pragma omp end declare target /* * The Point-in-Polygon kernel */ template <int tile_size> void pnpoly_opt( int*__restrict bitmap, const float2*__restrict point, const float2*__restrict vertex, int n) { #pragma omp target teams distribute parallel for thread_limit(256) for (int i = 0; i < n; i++) { int c[tile_size]; float2 lpoint[tile_size]; #pragma unroll for (int ti=0; ti<tile_size; ti++) { c[ti] = 0; if (i+BLOCK_SIZE_X*ti < n) { lpoint[ti] = point[i+BLOCK_SIZE_X*ti]; } } int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { // edge from vj to vk float2 vj = vertex[j]; float2 vk = vertex[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); #pragma unroll for (int ti=0; ti<tile_size; ti++) { float2 p = lpoint[ti]; if (is_between(p.y, vj.y, vk.y) && //if p is between vj and vk vertically (p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vj-vk when moved in positive x-direction c[ti] = !c[ti]; } } } #pragma unroll for (int ti=0; ti<tile_size; ti++) { //could do an if statement here if 1s are expected to be rare if (i+BLOCK_SIZE_X*ti < n) bitmap[i+BLOCK_SIZE_X*ti] = c[ti]; } } } /* * The naive implementation is used for verifying correctness of the optimized implementation */ void pnpoly_base( int*__restrict bitmap, const float2*__restrict point, const float2*__restrict vertex, int n) { #pragma omp target teams distribute parallel for thread_limit(256) for (int i = 0; i < n; i++) { int c = 0; float2 p = point[i]; int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { // edge from v to vp float2 vj = vertex[j]; float2 vk = vertex[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if (((vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically (p.x < slope * (p.y-vj.y) + vj.x)) { //if p.x crosses the line vj-vk when moved in positive x-direction c = !c; } } bitmap[i] = c; // 0 if even (out), and 1 if odd (in) } }
parallel.c
#include <stdbool.h> #include <stdio.h> #include <math.h> #include "mpi.h" #include "ppp/ppp.h" #include "ppp_pnm/ppp_pnm.h" /** * Computes the force body j exercises on body i. * The acceleration is returned in *ax and *ay. */ static void compute(body* bodies, int i, int j, long double* ax, long double* ay) { long double aij_x, aij_y, dx, dy, r3; dx = bodies[j].x - bodies[i].x; dy = bodies[j].y - bodies[i].y; r3 = powl(sqrtl(dx * dx + dy * dy), 3); aij_x = dx / r3; aij_y = dy / r3; *ax = aij_x * bodies[j].mass; *ay = aij_y * bodies[j].mass; } /** * Updates (x,y) and (vx,vy) of body b from its * current position, velocity and acceleration. */ static void update(body* b, double deltaT, long double ax, long double ay) { long double dvx, dvy; dvx = ax * (G * deltaT); dvy = ay * (G * deltaT); b->x += (b->vx + dvx / 2) * deltaT; b->y += (b->vy + dvy / 2) * deltaT; b->vx += dvx; b->vy += dvy; } /** * Determines length and offset in a problem buffer of size <size> * for process <rank> where the total number of processes is <np>. */ static void portionize(int size, int np, int rank, int* length, int* offset) { int portion = size / np; int residue = size % np; if (rank < residue) { *length = portion + 1; *offset = (*length) * rank; } else { *length = portion; *offset = (*length) * rank + residue; } if (*length == 0) { *offset = 0; } } /** * Determines x and y accelerations exercised by body j on body i. */ void acc_foo(body* bodies, int i, int j, long double* accX, long double* accY) { long double ax, ay; compute(bodies, i, j, &ax, &ay); accX[i] += ax; accY[i] += ay; } /** * Determines x and y accelerations exercised by body j on body i and * uses those to symmetrically set accelerations of body i on body j. */ void acc_bar(body* bodies, int i, int j, long double* accX, long double* accY) { long double ax, ay; compute(bodies, i, j, &ax, &ay); accX[i] += ax; accY[i] += ay; long double ratio = bodies[i].mass / bodies[j].mass; accX[j] -= ratio * ax; accY[j] -= ratio * ay; } /** * Performs the nbody simulation in a distributed fashion using MPI and OpenMP. */ void compute_parallel(struct TaskInput* TI) { int np, self; MPI_Comm_size(MPI_COMM_WORLD, &np); MPI_Comm_rank(MPI_COMM_WORLD, &self); const bool debug = TI->debug; const long double deltaT = TI->deltaT; const int nSteps = TI->nSteps; const int imageStep = TI->imageStep; const int nBodies = TI->nBodies; body* bodies = TI->bodies; MPI_Datatype body_type, pos_type; MPI_Type_contiguous(5, MPI_LONG_DOUBLE, &body_type); MPI_Type_commit(&body_type); MPI_Type_create_indexed_block(1, 2, (int[]){1}, MPI_LONG_DOUBLE, &pos_type); MPI_Type_create_resized(pos_type, 0, sizeof(body), &pos_type); MPI_Type_commit(&pos_type); int counts[np], displs[np]; #pragma omp parallel for for (int p = 0; p < np; p++) { portionize(nBodies, np, p, &counts[p], &displs[p]); } int myoffset = displs[self]; int mylength = counts[self]; long double accX[nBodies], accY[nBodies]; for (int step = 0; step < nSteps; step++) { // save an image snapshot every <imageStep> steps if (self == 0 && imageStep > 0 && step % imageStep == 0) { saveImage(step / imageStep, bodies, nBodies); } if (self == 0 && debug) { printf("%d\r", step); } // initialize this step's accelerations #pragma omp parallel for for (int k = 0; k < nBodies; k++) { accX[k] = accY[k] = 0; } if (TI->newton3) { // implementation with Newton's third law used globally /* * reduction on whole arrays turned out to be faster than * initiating parallelism every iteration and reducing individual * fields manually into temporary local variables. */ #pragma omp parallel for reduction(+:accX,accY) for (int i = myoffset; i < myoffset + mylength / 2; i++) { for (int j = 0; j < i; j++) { acc_bar(bodies, i, j, accX, accY); } } int p = (np - 1) - self; // mirror process #pragma omp parallel for reduction(+:accX,accY) for (int i = displs[p]+counts[p]/2; i < displs[p]+counts[p]; i++) { for (int j = 0; j < i; j++) { acc_bar(bodies, i, j, accX, accY); } } // sum accelerations together over all processes MPI_Request requests[2]; MPI_Iallreduce(MPI_IN_PLACE, &accX, nBodies, MPI_LONG_DOUBLE, MPI_SUM, MPI_COMM_WORLD, &requests[0]); MPI_Iallreduce(MPI_IN_PLACE, &accY, nBodies, MPI_LONG_DOUBLE, MPI_SUM, MPI_COMM_WORLD, &requests[1]); MPI_Waitall(2, requests, MPI_STATUSES_IGNORE); } else if (TI->newton3local) { // implementation with Newton's third law for local computations #pragma omp parallel for reduction(+:accX,accY) for (int i = myoffset; i < myoffset + mylength; i++) { for (int j = 0; j < nBodies; j++) { if (j >= myoffset && j < myoffset + mylength) { if (j < i) { acc_bar(bodies, i, j, accX, accY); } } else { acc_foo(bodies, i, j, accX, accY); } } } } else { // straightforward implementation without calculation savings #pragma omp parallel for for (int i = myoffset; i < myoffset + mylength; i++) { for (int j = 0; j < nBodies; j++) { if (i == j) { continue; } acc_foo(bodies, i, j, accX, accY); } } } // update this process' associated bodies #pragma omp parallel for for (int i = myoffset; i < myoffset + mylength; i++) { update(&bodies[i], deltaT, accX[i], accY[i]); } // sync body positions over all processes MPI_Allgatherv(MPI_IN_PLACE, 0, pos_type, bodies, counts, displs, pos_type, MPI_COMM_WORLD); } // save a final snapshot if <imageStep> divides <nSteps> if (self == 0 && imageStep > 0 && nSteps % imageStep == 0) { saveImage(nSteps / imageStep, bodies, nBodies); } if (debug) { printf("\n"); } // collect final result on root if (self == 0) { MPI_Gatherv(MPI_IN_PLACE, 0, body_type, bodies, counts, displs, body_type, 0, MPI_COMM_WORLD); } else { MPI_Gatherv(bodies + myoffset, mylength, body_type, bodies, counts, displs, body_type, 0, MPI_COMM_WORLD); } // clean up custom types MPI_Type_free(&body_type); MPI_Type_free(&pos_type); }
gemver.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "gemver.h" /* Array initialization. */ static void init_array (int n, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_1D(u1,N,n), DATA_TYPE POLYBENCH_1D(v1,N,n), DATA_TYPE POLYBENCH_1D(u2,N,n), DATA_TYPE POLYBENCH_1D(v2,N,n), DATA_TYPE POLYBENCH_1D(w,N,n), DATA_TYPE POLYBENCH_1D(x,N,n), DATA_TYPE POLYBENCH_1D(y,N,n), DATA_TYPE POLYBENCH_1D(z,N,n)) { int i, j; *alpha = 43532; *beta = 12313; for (i = 0; i < n; i++) { u1[i] = i; u2[i] = (i+1)/n/2.0; v1[i] = (i+1)/n/4.0; v2[i] = (i+1)/n/6.0; y[i] = (i+1)/n/8.0; z[i] = (i+1)/n/9.0; x[i] = 0.0; w[i] = 0.0; for (j = 0; j < n; j++) A[i][j] = ((DATA_TYPE) i*j) / n; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_1D(w,N,n)) { int i; for (i = 0; i < n; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, w[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_gemver(int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_1D(u1,N,n), DATA_TYPE POLYBENCH_1D(v1,N,n), DATA_TYPE POLYBENCH_1D(u2,N,n), DATA_TYPE POLYBENCH_1D(v2,N,n), DATA_TYPE POLYBENCH_1D(w,N,n), DATA_TYPE POLYBENCH_1D(x,N,n), DATA_TYPE POLYBENCH_1D(y,N,n), DATA_TYPE POLYBENCH_1D(z,N,n)) { int i, j; #pragma scop #pragma omp parallel { #pragma omp for private (j) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_N; j++) A[i][j] = A[i][j] + u1[i] * v1[j] + u2[i] * v2[j]; #pragma omp for private (j) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_N; j++) x[i] = x[i] + beta * A[j][i] * y[j]; #pragma omp for for (i = 0; i < _PB_N; i++) x[i] = x[i] + z[i]; #pragma omp for private (j) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_N; j++) w[i] = w[i] + alpha * A[i][j] * x[j]; } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); POLYBENCH_1D_ARRAY_DECL(u1, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(v1, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(u2, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(v2, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(w, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(z, DATA_TYPE, N, n); /* Initialize array(s). */ init_array (n, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(u1), POLYBENCH_ARRAY(v1), POLYBENCH_ARRAY(u2), POLYBENCH_ARRAY(v2), POLYBENCH_ARRAY(w), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(z)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_gemver (n, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(u1), POLYBENCH_ARRAY(v1), POLYBENCH_ARRAY(u2), POLYBENCH_ARRAY(v2), POLYBENCH_ARRAY(w), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(z)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(w))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(u1); POLYBENCH_FREE_ARRAY(v1); POLYBENCH_FREE_ARRAY(u2); POLYBENCH_FREE_ARRAY(v2); POLYBENCH_FREE_ARRAY(w); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(z); return 0; }
core_zunmqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_unmqr * * Overwrites the general m-by-n tile C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = Plasma_ConjTrans Q^H * C C * Q^H * * where Q is a unitary matrix defined as the product of k * elementary reflectors * \f[ * Q = H(1) H(2) ... H(k) * \f] * as returned by plasma_core_zgeqrt. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^H from the Left; * - PlasmaRight : apply Q or Q^H from the Right. * * @param[in] trans * - PlasmaNoTrans : No transpose, apply Q; * - Plasma_ConjTrans : Transpose, apply Q^H. * * @param[in] m * The number of rows of the tile C. m >= 0. * * @param[in] n * The number of columns of the tile C. n >= 0. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * If side = PlasmaLeft, m >= k >= 0; * if side = PlasmaRight, n >= k >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in] A * Dimension: (lda,k) * The i-th column must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, * as returned by plasma_core_zgeqrt in the first k columns of its * array argument A. * * @param[in] lda * The leading dimension of the array A. * If side = PlasmaLeft, lda >= max(1,m); * if side = PlasmaRight, lda >= max(1,n). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param[in,out] C * On entry, the m-by-n tile C. * On exit, C is overwritten by Q*C or Q^T*C or C*Q^T or C*Q. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * * @param work * Auxiliary workspace array of length * ldwork-by-n if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,m) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_complex64_t *work, int ldwork) { // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { plasma_coreblas_error("illegal value of side"); return -1; } int nq; // order of Q int nw; // dimension of work if (side == PlasmaLeft) { nq = m; nw = n; } else { nq = n; nw = m; } if (trans != PlasmaNoTrans && trans != Plasma_ConjTrans) { plasma_coreblas_error("illegal value of trans"); return -2; } if (m < 0) { plasma_coreblas_error("illegal value of m"); return -3; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -4; } if (k < 0 || k > nq) { plasma_coreblas_error("illegal value of k"); return -5; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -6; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -7; } if (lda < imax(1, nq) && nq > 0) { plasma_coreblas_error("illegal value of lda"); return -8; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -9; } if (ldt < imax(1, ib)) { plasma_coreblas_error("illegal value of ldt"); return -10; } if (C == NULL) { plasma_coreblas_error("NULL C"); return -11; } if (ldc < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of ldc"); return -12; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -13; } if (ldwork < imax(1, nw) && nw > 0) { plasma_coreblas_error("illegal value of ldwork"); return -14; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans != PlasmaNoTrans) || (side == PlasmaRight && trans == PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int ni = n; int mi = m; if (side == PlasmaLeft) { // H or H^H is applied to C(i:m,1:n). mi = m - i; ic = i; } else { // H or H^H is applied to C(1:m,i:n). ni = n - i; jc = i; } // Apply H or H^H. LAPACKE_zlarfb_work(LAPACK_COL_MAJOR, lapack_const(side), lapack_const(trans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), mi, ni, kb, &A[lda*i+i], lda, &T[ldt*i], ldt, &C[ldc*jc+ic], ldc, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*k]) \ depend(in:T[0:ib*k]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *W = (plasma_complex64_t*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? n : m; // TODO: double check // Call the kernel. int info = plasma_core_zunmqr(side, trans, m, n, k, ib, A, lda, T, ldt, C, ldc, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_zunmqr() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
Albus_spmv.h
#include<iostream> #include<stdio.h> #include<math.h> #include<time.h> #include<omp.h> #include<immintrin.h> #include<cstring> #include<sys/time.h> #include<stdlib.h> using namespace std; #define INT int #define DOU double #define AVX_DOU __m256d #define SSE_DOU __m128d inline DOU SIMD_fast1(INT start1,INT num,INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,DOU * __restrict vec_val) { DOU answer; switch(num) { case 4 : { register SSE_DOU mtx_3 , vec_3 , ans_3 , mtx_3_1 , vec_3_1; register INT s1,s2,s3; s1 = start1 + 1; s2 = start1 + 2; s3 = start1 + 3; mtx_3 = _mm_load_pd(mtx_val+start1); mtx_3_1 = _mm_load_pd(mtx_val+s2); vec_3 = _mm_set_pd(vec_val[col_idx[s1]],vec_val[col_idx[start1]]); vec_3_1 = _mm_set_pd(vec_val[col_idx[s3]],vec_val[col_idx[s2]]); ans_3 = _mm_fmadd_pd(mtx_3_1,vec_3_1,_mm_mul_pd(mtx_3,vec_3)); answer = ans_3[0]+ans_3[1]; return answer; } default : { register AVX_DOU mtx_ans_1,mtx_3,vec_3; register INT s1,s2,s3; register INT t = num & (~3); register INT start2 = start1 + t; register INT num_1 = num & 3; s1 = start1 + 1; s2 = start1 + 2; s3 = start1 + 3; _mm_prefetch((DOU *)&mtx_val[start1+16],_MM_HINT_T0); _mm_prefetch((DOU *)&col_idx[start1+16],_MM_HINT_T0); mtx_3 = _mm256_load_pd(mtx_val+start1); vec_3 = _mm256_set_pd(vec_val[col_idx[s3]],vec_val[col_idx[s2]],vec_val[col_idx[s1]],vec_val[col_idx[start1]]); mtx_ans_1 = _mm256_mul_pd(mtx_3,vec_3); start1 += 4; #pragma unroll(32) for(;start1<start2;start1+=4) { s1 = start1 + 1; s2 = start1 + 2; s3 = start1 + 3; mtx_3 = _mm256_load_pd(mtx_val+start1); vec_3 = _mm256_setr_pd(vec_val[col_idx[start1]],vec_val[col_idx[s1]],vec_val[col_idx[s2]],vec_val[col_idx[s3]]); mtx_ans_1 = _mm256_fmadd_pd(mtx_3,vec_3,mtx_ans_1); } switch (num_1) { case 0 : { mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; return answer; } case 1 : { mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; answer = answer + (mtx_val[start2]*vec_val[col_idx[start2]]); return answer; } case 2 : { mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; s1 = start2 + 1; answer = answer+(mtx_val[start2]*vec_val[col_idx[start2]]+mtx_val[s1]*vec_val[col_idx[s1]]); return answer; } default : { s1 = start2 + 1; s2 = start2 + 2; mtx_3 = _mm256_load_pd(mtx_val+start2); vec_3 = _mm256_set_pd(0,vec_val[col_idx[s2]],vec_val[col_idx[s1]],vec_val[col_idx[start2]]); mtx_ans_1 = _mm256_fmadd_pd(mtx_3,vec_3,mtx_ans_1); mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; return answer; } } } } } inline DOU SIMD_fast2(INT start1,INT num,INT * __restrict__ row_ptr,INT * __restrict__ col_idx,DOU * __restrict__ mtx_val,DOU * __restrict__ vec_val) { register DOU answer; switch(num) { case 0 : return 0; case 1 : { answer = mtx_val[start1] * vec_val[col_idx[start1]]; return answer; } case 2 : { register INT s1; s1 = start1 + 1; answer = mtx_val[start1] * vec_val[col_idx[start1]] + mtx_val[s1] * vec_val[col_idx[s1]]; return answer; } case 3 : { register SSE_DOU mtx_3 , vec_3 , ans_3 , mtx_3_1 , vec_3_1; register INT s1,s2; s1 = start1 + 1; s2 = start1 + 2; mtx_3 = _mm_load_pd(mtx_val+start1); mtx_3_1 = _mm_load_pd(mtx_val+s2); vec_3 = _mm_set_pd(vec_val[col_idx[s1]],vec_val[col_idx[start1]]); vec_3_1 = _mm_set_pd(0,vec_val[col_idx[s2]]); ans_3 = _mm_fmadd_pd(mtx_3_1,vec_3_1,_mm_mul_pd(mtx_3,vec_3)); answer = ans_3[0] + ans_3[1]; return answer; } } } inline DOU calculation(INT start1,INT num,INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,DOU * __restrict vec_val) { if(num>=4) { return SIMD_fast1(start1,num,row_ptr,col_idx,mtx_val,vec_val); } else { return SIMD_fast2(start1,num,row_ptr,col_idx,mtx_val,vec_val); } } inline void thread_block(INT thread_id,INT start,INT end,INT start2,INT end2,INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,DOU * __restrict mtx_ans,DOU * __restrict mid_ans,DOU * __restrict vec_val) { register INT start1,end1,num,Thread,i; register DOU sum; switch(start < end) { case true: { mtx_ans[start] = 0.0; mtx_ans[end] = 0.0; start1 = row_ptr[start] + start2; start++; end1 = row_ptr[start]; num = end1 - start1; Thread = thread_id<<1; mid_ans[Thread] = calculation(start1,num,row_ptr,col_idx,mtx_val,vec_val); start1 = end1; #pragma simd for(i=start;i<end;++i) { end1 = row_ptr[i+1]; num = end1 - start1; sum = calculation(start1,num,row_ptr,col_idx,mtx_val,vec_val); mtx_ans[i] = sum; start1 = end1; } start1 = row_ptr[end]; end1 = start1 + end2; mid_ans[Thread | 1] = calculation(start1,end2,row_ptr,col_idx,mtx_val,vec_val); return ; } default : { mtx_ans[start] = 0.0; Thread = thread_id<<1; start1 = row_ptr[start] + start2; num = end2 - start2; mid_ans[Thread] = calculation(start1,num,row_ptr,col_idx,mtx_val,vec_val); mid_ans[Thread | 1] = 0.0; return ; } } } inline INT binary_search(INT *&row_ptr,INT num,INT end) { INT l,r,h,t=0; l=0,r=end; while(l<=r) { h = (l+r)>>1; if(row_ptr[h]>=num) { r=h-1; } else { l=h+1; t=h; } } return t; } inline void albus_balance(INT *&row_ptr,INT *&par_set,INT *&start,INT *&end,INT *&start1,INT *&end1,DOU *&mid_ans,INT thread_nums) { register int tmp; start[0] = 0; start1[0] = 0; end[thread_nums-1] = par_set[0]; end1[thread_nums-1] = 0; INT tt=par_set[2]/thread_nums; for(INT i=1;i<thread_nums;i++) { tmp=tt*i; start[i] = binary_search(row_ptr,tmp,par_set[0]); start1[i] = tmp - row_ptr[start[i]]; end[i-1] = start[i]; end1[i-1] = start1[i]; } } inline void SPMV_DOU(INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,INT * __restrict par_set,DOU * __restrict mtx_ans,DOU * __restrict vec_val,INT * __restrict start,INT * __restrict end,INT * __restrict start1,INT * __restrict end1,DOU * __restrict mid_ans, INT thread_nums) { register INT i; #pragma omp parallel private(i) { #pragma omp for schedule(static) nowait for(i=0;i<thread_nums;++i) { thread_block(i,start[i],end[i],start1[i],end1[i],row_ptr,col_idx,mtx_val,mtx_ans,mid_ans,vec_val); } } mtx_ans[0] = mid_ans[0]; INT sub; #pragma unroll(32) for(i=1;i<thread_nums;++i) { sub = i<<1; register INT tmp1 = start[i]; register INT tmp2 = end[i-1]; if(tmp1 == tmp2) { mtx_ans[tmp1] += (mid_ans[sub-1] + mid_ans[sub]); } else { mtx_ans[tmp1] += mid_ans[sub]; mtx_ans[tmp2] += mid_ans[sub-1]; } } }
c55c7aec73df0f31d67fbe39510946453b899e1d.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; double section2; } ; int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data; float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data; float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; #pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target teams distribute parallel for collapse(3) for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12]; u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])); int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])); int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])); int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1; int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1; int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8; } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; struct timeval start_section2, end_section2; gettimeofday(&start_section2, NULL); /* Begin section2 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1) { int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])); int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])); int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])); int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1; int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1; int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]); float sum = 0.0F; if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } rec[time][p_rec] = sum; } /* End section2 */ gettimeofday(&end_section2, NULL); timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000; } #pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) return 0; }
convdw5x5s1_neon.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*25; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); float32x4_t _bias0 = vdupq_n_f32(bias0); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18 "mov v8.16b, %25.16b \n"// v8 = _bias0 "mov v9.16b, %25.16b \n"// v9 = _bias0 "0: \n" "mov v10.16b, %25.16b \n"// v10 = _bias0 "mov v11.16b, %25.16b \n"// v11 = _bias0 "fmla v8.4s, v16.4s, %19.s[1] \n" "fmla v10.4s, v16.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n"// r11 "fmla v9.4s, v17.4s, %19.s[1] \n" "fmla v11.4s, v17.4s, %18.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n"// r15 "fmla v8.4s, v17.4s, %20.s[1] \n" "fmla v10.4s, v17.4s, %19.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n"// r12 "fmla v9.4s, v18.4s, %20.s[1] \n" "fmla v11.4s, v18.4s, %19.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n"// r16 "fmla v8.4s, v19.4s, %19.s[2] \n" "fmla v10.4s, v19.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n"// r13 "fmla v9.4s, v20.4s, %19.s[2] \n" "fmla v11.4s, v20.4s, %18.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n"// r17 "fmla v8.4s, v21.4s, %19.s[3] \n" "fmla v10.4s, v21.4s, %18.s[2] \n" "add %4, %4, #32 \n" "fmla v9.4s, v22.4s, %19.s[3] \n" "fmla v11.4s, v22.4s, %18.s[2] \n" // r2 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r20 r24 r28 "fmla v8.4s, v19.4s, %20.s[0] \n" "fmla v10.4s, v19.4s, %18.s[3] \n" "fmla v9.4s, v20.4s, %20.s[0] \n" "fmla v11.4s, v20.4s, %18.s[3] \n" "add %5, %5, #32 \n" "fmla v8.4s, v12.4s, %20.s[2] \n" "fmla v10.4s, v12.4s, %19.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n"// r21 "fmla v9.4s, v13.4s, %20.s[2] \n" "fmla v11.4s, v13.4s, %19.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n"// r25 "fmla v8.4s, v13.4s, %21.s[2] \n" "fmla v10.4s, v13.4s, %20.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n"// r22 "fmla v9.4s, v14.4s, %21.s[2] \n" "fmla v11.4s, v14.4s, %20.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n"// r26 "fmla v8.4s, v21.4s, %20.s[3] \n" "fmla v10.4s, v21.4s, %19.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n"// r23 "fmla v9.4s, v22.4s, %20.s[3] \n" "fmla v11.4s, v22.4s, %19.s[2] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n"// r27 "fmla v8.4s, v19.4s, %21.s[0] \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "fmla v9.4s, v20.4s, %21.s[0] \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r3 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r30 r34 r38 "fmla v8.4s, v21.4s, %21.s[1] \n" "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v9.4s, v22.4s, %21.s[1] \n" "fmla v11.4s, v22.4s, %20.s[0] \n" "add %6, %6, #32 \n" "fmla v8.4s, v16.4s, %21.s[3] \n" "fmla v10.4s, v16.4s, %20.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n"// r31 "fmla v9.4s, v17.4s, %21.s[3] \n" "fmla v11.4s, v17.4s, %20.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n"// r35 "fmla v8.4s, v17.4s, %22.s[3] \n" "fmla v10.4s, v17.4s, %21.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n"// r32 "fmla v9.4s, v18.4s, %22.s[3] \n" "fmla v11.4s, v18.4s, %21.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n"// r36 "fmla v8.4s, v19.4s, %22.s[0] \n" "fmla v10.4s, v19.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n"// r33 "fmla v9.4s, v20.4s, %22.s[0] \n" "fmla v11.4s, v20.4s, %20.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n"// r37 "fmla v8.4s, v21.4s, %22.s[1] \n" "fmla v10.4s, v21.4s, %21.s[0] \n" "fmla v9.4s, v22.4s, %22.s[1] \n" "fmla v11.4s, v22.4s, %21.s[0] \n" // r4 "prfm pldl1keep, [%7, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n"// v12 v13 v14 = r40 r44 r48 "fmla v8.4s, v19.4s, %22.s[2] \n" "fmla v10.4s, v19.4s, %21.s[1] \n" "add %7, %7, #32 \n" "fmla v9.4s, v20.4s, %22.s[2] \n" "fmla v11.4s, v20.4s, %21.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n"// r41 "fmla v8.4s, v12.4s, %23.s[0] \n" "fmla v10.4s, v12.4s, %21.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n"// r45 "fmla v9.4s, v13.4s, %23.s[0] \n" "fmla v11.4s, v13.4s, %21.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n"// r42 "fmla v8.4s, v13.4s, %24.s[0] \n" "fmla v10.4s, v13.4s, %22.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n"// r46 "fmla v9.4s, v14.4s, %24.s[0] \n" "fmla v11.4s, v14.4s, %22.s[3] \n" // r0 and r5 "prfm pldl1keep, [%3, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n"// v16 v17 v18 = r00 r04 r08 "fmla v8.4s, v21.4s, %23.s[1] \n" "fmla v10.4s, v21.4s, %22.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n"// r43 "fmla v9.4s, v22.4s, %23.s[1] \n" "fmla v11.4s, v22.4s, %22.s[0] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n"// r47 "fmla v8.4s, v19.4s, %23.s[2] \n" "fmla v10.4s, v19.4s, %22.s[1] \n" "prfm pldl1keep, [%8, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n"// v12 v13 v14 = r50 r54 r58 "fmla v9.4s, v20.4s, %23.s[2] \n" "fmla v11.4s, v20.4s, %22.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n"// r01 "fmla v8.4s, v21.4s, %23.s[3] \n" "fmla v10.4s, v21.4s, %22.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #4 \n"// r51 "fmla v9.4s, v22.4s, %23.s[3] \n" "fmla v11.4s, v22.4s, %22.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n"// r05 "fmla v8.4s, v16.4s, %18.s[0] \n" "fmla v10.4s, v12.4s, %23.s[0] \n" "ext v24.16b, v13.16b, v14.16b, #4 \n"// r55 "fmla v9.4s, v17.4s, %18.s[0] \n" "fmla v11.4s, v13.4s, %23.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n"// r02 "fmla v8.4s, v17.4s, %19.s[0] \n" "fmla v10.4s, v13.4s, %24.s[0] \n" "ext v25.16b, v12.16b, v13.16b, #8 \n"// r52 "fmla v9.4s, v18.4s, %19.s[0] \n" "fmla v11.4s, v14.4s, %24.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n"// r06 "fmla v8.4s, v19.4s, %18.s[1] \n" "fmla v10.4s, v23.4s, %23.s[1] \n" "ext v26.16b, v13.16b, v14.16b, #8 \n"// r56 "fmla v9.4s, v20.4s, %18.s[1] \n" "fmla v11.4s, v24.4s, %23.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n"// r03 "fmla v8.4s, v21.4s, %18.s[2] \n" "fmla v10.4s, v25.4s, %23.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n"// r53 "fmla v9.4s, v22.4s, %18.s[2] \n" "fmla v11.4s, v26.4s, %23.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n"// r07 "fmla v8.4s, v19.4s, %18.s[3] \n" "fmla v10.4s, v23.4s, %23.s[3] \n" "ext v24.16b, v13.16b, v14.16b, #12 \n"// r57 "fmla v9.4s, v20.4s, %18.s[3] \n" "add %3, %3, #32 \n" "fmla v11.4s, v24.4s, %23.s[3] \n" "add %8, %8, #32 \n" // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18 "subs %w0, %w0, #1 \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "mov v8.16b, %25.16b \n"// v8 = _bias0 "mov v9.16b, %25.16b \n"// v9 = _bias0 "st1 {v10.4s, v11.4s}, [%2], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26" ); } if (remain >= 4) { remain -= 4; asm volatile( // r1 "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n"// v12 v13 = r10 r14 "mov v8.16b, %23.16b \n"// v8 = _bias0 "mov v9.16b, %23.16b \n"// v9 = _bias0 "fmul v10.4s, v12.4s, %17.s[1] \n" "fmul v11.4s, v12.4s, %16.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n"// r11 "fmla v8.4s, v13.4s, %18.s[1] \n" "fmla v9.4s, v13.4s, %17.s[0] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n"// r12 "fmla v10.4s, v21.4s, %17.s[2] \n" "fmla v11.4s, v21.4s, %16.s[1] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n"// r13 "fmla v8.4s, v22.4s, %17.s[3] \n" "fmla v9.4s, v22.4s, %16.s[2] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4s, v17.4s}, [%4] \n"// v16 v17 = r20 r24 "fmla v10.4s, v23.4s, %18.s[0] \n" "fmla v11.4s, v23.4s, %16.s[3] \n" "add %4, %4, #16 \n" "fmla v8.4s, v16.4s, %18.s[2] \n" "fmla v9.4s, v16.4s, %17.s[1] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n"// r21 "fmla v10.4s, v17.4s, %19.s[2] \n" "fmla v11.4s, v17.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n"// r22 "fmla v8.4s, v18.4s, %18.s[3] \n" "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n"// r23 "fmla v10.4s, v19.4s, %19.s[0] \n" "fmla v11.4s, v19.4s, %17.s[3] \n" // r3 "prfm pldl1keep, [%5, #256] \n" "ld1 {v12.4s, v13.4s}, [%5] \n"// v12 v13 = r30 r34 "fmla v8.4s, v20.4s, %19.s[1] \n" "fmla v9.4s, v20.4s, %18.s[0] \n" "add %5, %5, #16 \n" "fmla v10.4s, v12.4s, %19.s[3] \n" "fmla v11.4s, v12.4s, %18.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n"// r31 "fmla v8.4s, v13.4s, %20.s[3] \n" "fmla v9.4s, v13.4s, %19.s[2] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n"// r32 "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v11.4s, v21.4s, %18.s[3] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n"// r33 "fmla v8.4s, v22.4s, %20.s[1] \n" "fmla v9.4s, v22.4s, %19.s[0] \n" // r4 "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4s, v17.4s}, [%6] \n"// v16 v17 = r40 r44 "fmla v10.4s, v23.4s, %20.s[2] \n" "fmla v11.4s, v23.4s, %19.s[1] \n" "add %6, %6, #16 \n" "fmla v8.4s, v16.4s, %21.s[0] \n" "fmla v9.4s, v16.4s, %19.s[3] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n"// r41 "fmla v10.4s, v17.4s, %22.s[0] \n" "fmla v11.4s, v17.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n"// r42 "fmla v8.4s, v18.4s, %21.s[1] \n" "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n"// r43 "fmla v10.4s, v19.4s, %21.s[2] \n" "fmla v11.4s, v19.4s, %20.s[1] \n" // r0 "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4s, v17.4s}, [%2] \n"// v16 v17 = r00 r04 "fmla v8.4s, v20.4s, %21.s[3] \n" "fmla v9.4s, v20.4s, %20.s[2] \n" // r5 "prfm pldl1keep, [%7, #256] \n" "ld1 {v12.4s, v13.4s}, [%7] \n"// v12 v13 = r50 r54 "fmla v10.4s, v16.4s, %16.s[0] \n" "fmla v11.4s, v12.4s, %21.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n"// r01 "fmla v8.4s, v17.4s, %17.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n"// r51 "fmla v9.4s, v13.4s, %22.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n"// r02 "fmla v10.4s, v18.4s, %16.s[1] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n"// r52 "fmla v11.4s, v21.4s, %21.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n"// r03 "fmla v8.4s, v19.4s, %16.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n"// r53 "fmla v9.4s, v22.4s, %21.s[2] \n" "add %3, %3, #16 \n" "fmla v10.4s, v20.4s, %16.s[3] \n" "fmla v11.4s, v23.4s, %21.s[3] \n" "add %2, %2, #16 \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "add %7, %7, #16 \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" : "=r"(outptr), // %0 "=r"(outptr2), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr), "1"(outptr2), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k0123), // %16 "w"(_k4567), // %17 "w"(_k891011), // %18 "w"(_k12131415), // %19 "w"(_k16171819), // %20 "w"(_k20212223), // %21 "w"(_k24242424), // %22 "w"(_bias0) // %23 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } #else if (nn > 0) { asm volatile( // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14 "vmov q8, %q25 \n"// q8 = _bias0 "0: \n" "vmov q9, %q25 \n"// q9 = _bias0 "vmla.f32 q8, q14, %e19[1] \n" "vmla.f32 q9, q14, %e18[0] \n" "vext.32 q12, q14, q15, #1 \n"// r11 "vmla.f32 q8, q15, %e20[1] \n" "vmla.f32 q9, q15, %e19[0] \n" "vext.32 q13, q14, q15, #2 \n"// r12 "vmla.f32 q8, q12, %f19[0] \n" "vmla.f32 q9, q12, %e18[1] \n" "vext.32 q12, q14, q15, #3 \n"// r13 "vmla.f32 q8, q13, %f19[1] \n" "vmla.f32 q9, q13, %f18[0] \n" // r2 "pld [%5, #256] \n" "vld1.f32 {d20-d23}, [%5] \n"// q10 q11 = r20 r24 "vmla.f32 q8, q12, %e20[0] \n" "vmla.f32 q9, q12, %f18[1] \n" "add %5, #16 \n" "vmla.f32 q8, q10, %f20[0] \n" "vmla.f32 q9, q10, %e19[1] \n" "vext.32 q12, q10, q11, #1 \n"// r21 "vmla.f32 q8, q11, %f21[0] \n" "vmla.f32 q9, q11, %e20[1] \n" "vext.32 q13, q10, q11, #2 \n"// r22 "vmla.f32 q8, q12, %f20[1] \n" "vmla.f32 q9, q12, %f19[0] \n" "vext.32 q12, q10, q11, #3 \n"// r23 "vmla.f32 q8, q13, %e21[0] \n" "vmla.f32 q9, q13, %f19[1] \n" // r3 "pld [%6, #256] \n" "vld1.f32 {d28-d31}, [%6] \n"// q14 q15 = r30 r34 "vmla.f32 q8, q12, %e21[1] \n" "vmla.f32 q9, q12, %e20[0] \n" "add %6, #16 \n" "vmla.f32 q8, q14, %f21[1] \n" "vmla.f32 q9, q14, %f20[0] \n" "vext.32 q12, q14, q15, #1 \n"// r31 "vmla.f32 q8, q15, %f22[1] \n" "vmla.f32 q9, q15, %f21[0] \n" "vext.32 q13, q14, q15, #2 \n"// r32 "vmla.f32 q8, q12, %e22[0] \n" "vmla.f32 q9, q12, %f20[1] \n" "vext.32 q12, q14, q15, #3 \n"// r33 "vmla.f32 q8, q13, %e22[1] \n" "vmla.f32 q9, q13, %e21[0] \n" // r4 "pld [%7, #256] \n" "vld1.f32 {d20-d23}, [%7] \n"// q10 q11 = r40 r44 "vmla.f32 q8, q12, %f22[0] \n" "vmla.f32 q9, q12, %e21[1] \n" "add %7, #16 \n" "vmla.f32 q8, q10, %e23[0] \n" "vmla.f32 q9, q10, %f21[1] \n" "vext.32 q12, q10, q11, #1 \n"// r41 "vmla.f32 q8, q11, %e24[0] \n" "vmla.f32 q9, q11, %f22[1] \n" "vext.32 q13, q10, q11, #2 \n"// r42 "vmla.f32 q8, q12, %e23[1] \n" "vmla.f32 q9, q12, %e22[0] \n" "vext.32 q12, q10, q11, #3 \n"// r43 "vmla.f32 q8, q13, %f23[0] \n" "vmla.f32 q9, q13, %e22[1] \n" // r0 and r5 "pld [%3, #256] \n" "vld1.f32 {d20-d23}, [%3] \n"// q10 q11 = r00 r04 "vmla.f32 q8, q12, %f23[1] \n" "vmla.f32 q9, q12, %f22[0] \n" // r5 "pld [%8, #256] \n" "vld1.f32 {d28-d31}, [%8] \n"// q14 q15 = r50 r54 "vmla.f32 q8, q10, %e18[0] \n" "vmla.f32 q9, q14, %e23[0] \n" "vext.32 q12, q10, q11, #1 \n"// r01 "vmla.f32 q8, q11, %e19[0] \n" "vmla.f32 q9, q15, %e24[0] \n" "vext.32 q13, q14, q15, #1 \n"// r51 "vmla.f32 q8, q12, %e18[1] \n" "vext.32 q12, q10, q11, #2 \n"// r02 "vmla.f32 q9, q13, %e23[1] \n" "vext.32 q13, q14, q15, #2 \n"// r52 "vmla.f32 q8, q12, %f18[0] \n" "vext.32 q12, q10, q11, #3 \n"// r03 "vmla.f32 q9, q13, %f23[0] \n" "vext.32 q13, q14, q15, #3 \n"// r33 "vmla.f32 q8, q12, %f18[1] \n" "add %3, #16 \n" "vmla.f32 q9, q13, %f23[1] \n" "add %4, #16 \n" // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14 "add %8, #16 \n" "vst1.f32 {d16-d17}, [%1]! \n" "vmov q8, %q25 \n"// q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%2]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = bias0; float sum2 = bias0; #if __ARM_NEON // TODO neon assembly optimize float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 += r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // v10 v11 // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08 "mov v8.16b, %21.16b \n"// v8 = _bias0 "mov v9.16b, %21.16b \n"// v9 = _bias0 "0: \n" "fmul v10.4s, v16.4s, %14.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n"// r01 "fmul v11.4s, v17.4s, %14.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n"// r05 "fmla v8.4s, v17.4s, %15.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n"// r02 "fmla v9.4s, v18.4s, %15.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n"// r06 "fmla v10.4s, v19.4s, %14.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n"// r03 "fmla v11.4s, v20.4s, %14.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n"// r07 "fmla v8.4s, v21.4s, %14.s[2] \n" "fmla v9.4s, v22.4s, %14.s[2] \n" // r1 "prfm pldl1keep, [%3, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n"// v12 v13 v14 = r10 r14 r18 "fmla v10.4s, v19.4s, %14.s[3] \n" "fmla v11.4s, v20.4s, %14.s[3] \n" "fmla v8.4s, v12.4s, %15.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n"// r11 "fmla v9.4s, v13.4s, %15.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n"// r15 "fmla v10.4s, v13.4s, %16.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n"// r12 "fmla v11.4s, v14.4s, %16.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n"// r16 "fmla v8.4s, v19.4s, %15.s[2] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n"// r13 "fmla v9.4s, v20.4s, %15.s[2] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n"// r17 "fmla v10.4s, v21.4s, %15.s[3] \n" "fmla v11.4s, v22.4s, %15.s[3] \n" // r2 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r20 r24 r28 "fmla v8.4s, v19.4s, %16.s[0] \n" "fmla v9.4s, v20.4s, %16.s[0] \n" "fmla v10.4s, v16.4s, %16.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n"// r21 "fmla v11.4s, v17.4s, %16.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n"// r25 "fmla v8.4s, v17.4s, %17.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n"// r22 "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n"// r26 "fmla v10.4s, v19.4s, %16.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n"// r23 "fmla v11.4s, v20.4s, %16.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n"// r27 "fmla v8.4s, v21.4s, %17.s[0] \n" "fmla v9.4s, v22.4s, %17.s[0] \n" // r3 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r30 r34 r38 "fmla v10.4s, v19.4s, %17.s[1] \n" "fmla v11.4s, v20.4s, %17.s[1] \n" "fmla v8.4s, v12.4s, %17.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n"// r11 "fmla v9.4s, v13.4s, %17.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n"// r15 "fmla v10.4s, v13.4s, %18.s[3] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n"// r12 "fmla v11.4s, v14.4s, %18.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n"// r16 "fmla v8.4s, v19.4s, %18.s[0] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n"// r13 "fmla v9.4s, v20.4s, %18.s[0] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n"// r17 "fmla v10.4s, v21.4s, %18.s[1] \n" "fmla v11.4s, v22.4s, %18.s[1] \n" // r4 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r40 r44 r48 "fmla v8.4s, v19.4s, %18.s[2] \n" "fmla v9.4s, v20.4s, %18.s[2] \n" "fmla v10.4s, v16.4s, %19.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n"// r41 "fmla v11.4s, v17.4s, %19.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n"// r45 "fmla v8.4s, v17.4s, %20.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n"// r42 "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n"// r46 "fmla v10.4s, v19.4s, %19.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n"// r43 "fmla v11.4s, v20.4s, %19.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n"// r47 "fmla v8.4s, v21.4s, %19.s[2] \n" "add %2, %2, #32 \n" "fmla v9.4s, v22.4s, %19.s[2] \n" "add %3, %3, #32 \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "add %4, %4, #32 \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08 "add %5, %5, #32 \n" "fadd v10.4s, v8.4s, v10.4s \n" "add %6, %6, #32 \n" "fadd v11.4s, v9.4s, v11.4s \n" "mov v8.16b, %21.16b \n"// v8 = _bias0 "mov v9.16b, %21.16b \n"// v9 = _bias0 "subs %w0, %w0, #1 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22" ); } if (remain >= 4) { remain -= 4; asm volatile( // r0 "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4s, v17.4s}, [%1] \n"// v16 v17 = r00 r04 "mov v8.16b, %19.16b \n"// v8 = _bias0 "add %1, %1, #16 \n" "fmul v9.4s, v16.4s, %12.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n"// r01 "fmla v8.4s, v17.4s, %13.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n"// r02 "fmla v9.4s, v18.4s, %12.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n"// r03 "fmla v8.4s, v19.4s, %12.s[2] \n" // r1 "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n"// v10 v11 = r10 r14 "fmla v9.4s, v20.4s, %12.s[3] \n" "add %2, %2, #16 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n"// r11 "fmla v9.4s, v11.4s, %14.s[1] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n"// r12 "fmla v8.4s, v12.4s, %13.s[2] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n"// r13 "fmla v9.4s, v13.4s, %13.s[3] \n" // r2 "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4s, v17.4s}, [%3] \n"// v16 v17 = r20 r24 "fmla v8.4s, v14.4s, %14.s[0] \n" "add %3, %3, #16 \n" "fmla v9.4s, v16.4s, %14.s[2] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n"// r21 "fmla v8.4s, v17.4s, %15.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n"// r22 "fmla v9.4s, v18.4s, %14.s[3] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n"// r23 "fmla v8.4s, v19.4s, %15.s[0] \n" // r3 "prfm pldl1keep, [%4, #256] \n" "ld1 {v10.4s, v11.4s}, [%4] \n"// v10 v11 = r30 r34 "fmla v9.4s, v20.4s, %15.s[1] \n" "add %4, %4, #16 \n" "fmla v8.4s, v10.4s, %15.s[3] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n"// r31 "fmla v9.4s, v11.4s, %16.s[3] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n"// r32 "fmla v8.4s, v12.4s, %16.s[0] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n"// r33 "fmla v9.4s, v13.4s, %16.s[1] \n" // r4 "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4s, v17.4s}, [%5] \n"// v16 v17 = r40 r44 "fmla v8.4s, v14.4s, %16.s[2] \n" "add %5, %5, #16 \n" "fmla v9.4s, v16.4s, %17.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n"// r41 "fmla v8.4s, v17.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n"// r42 "fmla v9.4s, v18.4s, %17.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n"// r43 "fmla v8.4s, v19.4s, %17.s[2] \n" "fmla v9.4s, v20.4s, %17.s[3] \n" "fadd v8.4s, v8.4s, v9.4s \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20" ); } #else if (nn > 0) { asm volatile( // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04 "vmov q8, %q21 \n"// q8 = _bias0 "0: \n" "vmul.f32 q9, q10, %e14[0] \n" "vext.32 q12, q10, q11, #1 \n"// r01 "vmla.f32 q8, q11, %e15[0] \n" "vext.32 q13, q10, q11, #2 \n"// r02 "vmla.f32 q9, q12, %e14[1] \n" "vext.32 q12, q10, q11, #3 \n"// r03 "vmla.f32 q8, q13, %f14[0] \n" // r1 "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3] \n"// q14 q15 = r10 r14 "vmla.f32 q9, q12, %f14[1] \n" "add %3, #16 \n" "vmla.f32 q8, q14, %e15[1] \n" "vext.32 q12, q14, q15, #1 \n"// r11 "vmla.f32 q9, q15, %e16[1] \n" "vext.32 q13, q14, q15, #2 \n"// r12 "vmla.f32 q8, q12, %f15[0] \n" "vext.32 q12, q14, q15, #3 \n"// r13 "vmla.f32 q9, q13, %f15[1] \n" // r2 "pld [%4, #256] \n" "vld1.f32 {d20-d23}, [%4] \n"// q10 q11 = r20 r24 "vmla.f32 q8, q12, %e16[0] \n" "add %4, #16 \n" "vmla.f32 q9, q10, %f16[0] \n" "vext.32 q12, q10, q11, #1 \n"// r21 "vmla.f32 q8, q11, %f17[0] \n" "vext.32 q13, q10, q11, #2 \n"// r22 "vmla.f32 q9, q12, %f16[1] \n" "vext.32 q12, q10, q11, #3 \n"// r23 "vmla.f32 q8, q13, %e17[0] \n" // r3 "pld [%5, #256] \n" "vld1.f32 {d28-d31}, [%5] \n"// q14 q15 = r30 r34 "vmla.f32 q9, q12, %e17[1] \n" "add %5, #16 \n" "vmla.f32 q8, q14, %f17[1] \n" "vext.32 q12, q14, q15, #1 \n"// r31 "vmla.f32 q9, q15, %f18[1] \n" "vext.32 q13, q14, q15, #2 \n"// r32 "vmla.f32 q8, q12, %e18[0] \n" "vext.32 q12, q14, q15, #3 \n"// r33 "vmla.f32 q9, q13, %e18[1] \n" // r4 "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6] \n"// q10 q11 = r40 r44 "vmla.f32 q8, q12, %f18[0] \n" "add %6, #16 \n" "vmla.f32 q9, q10, %e19[0] \n" "vext.32 q12, q10, q11, #1 \n"// r41 "vmla.f32 q8, q11, %e20[0] \n" "vext.32 q13, q10, q11, #2 \n"// r42 "vmla.f32 q9, q12, %e19[1] \n" "vext.32 q12, q10, q11, #3 \n"// r43 "vmla.f32 q8, q13, %f19[0] \n" "add %2, #16 \n" "vmla.f32 q9, q12, %f19[1] \n" // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04 "vadd.f32 q9, q9, q8 \n" "vmov q8, %q21 \n"// q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ // TODO neon assembly optimize float sum = bias0; float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #else // TODO neon assembly optimize asm volatile( "veor q14, q14 \n" "vext.32 q14, %q19, q14, #3 \n"// q14 = bias0 0 0 0 "vld1.f32 {d16-d17}, [%1] \n"// q8 = r00 r01 r02 r03 "vld1.f32 {d18-d19}, [%2] \n"// q9 = r10 r11 r12 r13(X) "add r4, %1, #16 \n" "vld1.f32 {d19[1]}, [r4] \n" "vext.32 q9, q9, q9, #3 \n"// q9 = r04 r10 r11 r12 "vmla.f32 q14, q8, %q12 \n" "add r4, %2, #12 \n" "vld1.f32 {d20}, [r4] \n"// d20 = r13 r14 "vld1.f32 {d21}, [%3] \n"// d21 = r20 r21 "vmla.f32 q14, q9, %q13 \n" "add r4, %3, #8 \n" "vld1.f32 {d22-d23}, [r4] \n"// q11 = r22 r23 r24 X "vld1.f32 {d23[1]}, [%4] \n"// q11 = r22 r23 r24 r30 "vmla.f32 q14, q10, %q14 \n" "add r4, %4, #4 \n" "vld1.f32 {d24-d25}, [r4] \n"// q12 = r31 r32 r33 r34 "vmla.f32 q14, q11, %q15 \n" "vld1.f32 {d26-d27}, [%5] \n"// q13 = r40 r41 r42 r43 "vmla.f32 q14, q12, %q16 \n" "veor d30, d30 \n" "add r4, %5, #16 \n" "vld1.f32 {d30[0]}, [r4] \n"// d30 = r44 0 "vmla.f32 q14, q13, %q17 \n" "vmla.f32 d28, d30, %e18 \n" "add %1, #4 \n" // h-sum "vadd.f32 d28, d28, d29 \n" "add %2, #4 \n" "add %3, #4 \n" "vpadd.f32 d28, d28, d28 \n" "add %4, #4 \n" "add %5, #4 \n" "vst1.f32 {d28[0]}, [%0]! \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #endif } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } }
SybaseASE_fmt_plug.c
/* * Unicode conversion enhancements by magnum, 2011. Licensed as below. * * Sybase ASE hash support for version 15.0.2 and above, based on hmailserver * patch by James Nobis. * Hash format description : http://marcellmajor.com/sybase_sha256.html * Hacked together by Dhiru Kholia in February, 2011. * * This patch Copyright (C) 2010 by James Nobis - quel * quel NOSPAM quelrod NOSPAM net, and it is herby released to the general * public under the follow terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * Inspiration from the generic sha-1 and md5 (Copyright (c) 2010 by Solar Designer) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_SybaseASE; #elif FMT_REGISTERS_H john_register_one(&fmt_SybaseASE); #else #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #ifdef _OPENMP #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "sybasease" #define FORMAT_NAME "Sybase ASE" #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 64 #define CIPHERTEXT_LENGTH (6 + 16 + 64) #define PREFIX_LENGTH 6 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define OMP_SCALE 256 static struct fmt_tests SybaseASE_tests[] = { {"0xc0074f9cc8c0d55d9803b0c0816e127f2a56ee080230af5b4ce3da1f3d9fcc5449fcfcf3fb9595eb8ea6", "test12"}, {"0xc0074BE393C06BE420AD541671aa5e6f1a19a4a73bb51c59f45790f0887cfb70e0599747c6844d4556b3", "a"}, {NULL} }; static UTF16 (*prep_key)[518 / sizeof(UTF16)]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE/4]; static int kpc; extern struct fmt_main fmt_SybaseASE; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif kpc = self->params.max_keys_per_crypt; prep_key = mem_calloc_tiny(sizeof(*prep_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_alloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); if (pers_opts.target_enc == UTF_8) fmt_SybaseASE.params.plaintext_length = 125; } // TODO: strengthen checks static int valid(char *ciphertext, struct fmt_main *self) { if(strncmp(ciphertext, "0xc007", 6)!=0) return 0; if(strlen(ciphertext) != CIPHERTEXT_LENGTH) return 0; return 1; } static void *get_binary(char *ciphertext) { static unsigned char *out; int i; char *p = ciphertext + PREFIX_LENGTH + SALT_SIZE * 2; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *salt(char *ciphertext) { static union { unsigned char u8[SALT_SIZE]; ARCH_WORD_32 u32; } out; int i; char *p = ciphertext + PREFIX_LENGTH; for (i = 0; i < sizeof(out.u8); i++) { out.u8[i] = (atoi16[ARCH_INDEX(*p)] << 4) |atoi16[ARCH_INDEX(p[1])]; p += 2; } return out.u8; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xF; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xFF; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xFFF; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xFFFF; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xFFFFF; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xFFFFFF; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7FFFFFF; } static void set_salt(void *salt) { int index; for(index = 0; index < kpc; index++) { /* append salt at offset 510 */ memcpy((unsigned char*)prep_key[index] + 510, (unsigned char*)salt, 8); } } static void set_key(char *key, int index) { /* Clean slate */ memset(prep_key[index], 0, 2 * PLAINTEXT_LENGTH); /* convert key to UTF-16BE, --encoding aware */ enc_to_utf16_be(prep_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); } static char *get_key(int index) { UTF16 key_le[PLAINTEXT_LENGTH + 1]; UTF16 *s = prep_key[index]; UTF16 *d = key_le; // Byte-swap back to UTF-16LE while ((*d++ = *s >> 8 | *s << 8)) s++; return (char*)utf16_to_enc(key_le); } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for default(none) private(index) shared(count, crypt_out, prep_key) for(index = 0; index < count; index++) #endif { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, prep_key[index], 518); SHA256_Final((unsigned char *)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (index = 0; index < count; index++) #endif if (*(ARCH_WORD_32 *)binary == *(ARCH_WORD_32 *)crypt_out[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp((char *)binary, (const char*)crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_SybaseASE = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { NULL }, #endif SybaseASE_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
update_ops_matrix_dense_single.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif //void single_qubit_dense_matrix_gate_old_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void single_qubit_dense_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void single_qubit_dense_matrix_gate_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void single_qubit_dense_matrix_gate_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); void single_qubit_dense_matrix_gate(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { //single_qubit_dense_matrix_gate_old_single(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_old_parallel(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_parallel_simd(target_qubit_index, matrix, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim); } else { single_qubit_dense_matrix_gate_parallel_simd(target_qubit_index, matrix, state, dim); } #else single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { //single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim); single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim); } else { //single_qubit_dense_matrix_gate_parallel_unroll(target_qubit_index, matrix, state, dim); single_qubit_dense_matrix_gate_parallel(target_qubit_index, matrix, state, dim); } #else //single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim); single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim); #endif #endif } void single_qubit_dense_matrix_gate_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #endif void single_qubit_dense_matrix_gate_single_unroll(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; for (basis = 0; basis < dim; basis+=2) { CTYPE val0a = state[basis]; CTYPE val1a = state[basis + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; state[basis] = res0a; state[basis + 1] = res1a; } } else { ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; CTYPE val0a = state[basis_0]; CTYPE val0b = state[basis_0 + 1]; CTYPE val1a = state[basis_1]; CTYPE val1b = state[basis_1 + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1b = val0b * matrix[2] + val1b * matrix[3]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; CTYPE res0b = val0b * matrix[0] + val1b * matrix[1]; state[basis_0] = res0a; state[basis_0 + 1] = res0b; state[basis_1] = res1a; state[basis_1 + 1] = res1b; } } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_parallel_unroll(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; #pragma omp parallel for for (basis = 0; basis < dim; basis += 2) { CTYPE val0a = state[basis]; CTYPE val1a = state[basis + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; state[basis] = res0a; state[basis + 1] = res1a; } } else { ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; CTYPE val0a = state[basis_0]; CTYPE val0b = state[basis_0 + 1]; CTYPE val1a = state[basis_1]; CTYPE val1b = state[basis_1 + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1b = val0b * matrix[2] + val1b * matrix[3]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; CTYPE res0b = val0b * matrix[0] + val1b * matrix[1]; state[basis_0] = res0a; state[basis_0 + 1] = res0b; state[basis_1] = res1a; state[basis_1 + 1] = res1b; } } } #endif #ifdef _USE_SIMD void single_qubit_dense_matrix_gate_single_simd(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); for (basis = 0; basis < dim; basis += 2) { double* ptr = (double*)(state + basis); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else { ITYPE state_index = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); for (state_index = 0; state_index < loop_dim; state_index+=2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_parallel_simd(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); #pragma omp parallel for for (basis = 0; basis < dim; basis += 2) { double* ptr = (double*)(state + basis); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else { ITYPE state_index = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #endif #endif /* void single_qubit_dense_matrix_gate_old_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // loop variables const ITYPE loop_dim = dim / 2; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create index ITYPE basis_0 = insert_zero_to_basis_index(state_index, target_mask, target_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ target_mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // loop variables const ITYPE loop_dim = dim / 2; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create index ITYPE basis_0 = insert_zero_to_basis_index(state_index, target_mask, target_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ target_mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #endif */
GB_binop__pair_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp32) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: float // A type: float // B,b type: float // BinaryOp: cij = 1 #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP32 || GxB_NO_PAIR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(t1-2,3)),ceild(2*t1-2*t2-1,3)),ceild(16*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(8*t1+Ny+7,24)),floord(16*t2+Ny+3,24)),floord(16*t1-16*t2+Nz+Ny+5,24));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(24*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(24*t3+Nx+11,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),6*t3+4),32*t4+30);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
vision.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII SSSSS IIIII OOO N N % % V V I SS I O O NN N % % V V I SSS I O O N N N % % V V I SS I O O N NN % % V IIIII SSSSS IIIII OOO N N % % % % % % MagickCore Computer Vision Methods % % % % Software Design % % Cristy % % September 2014 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/opencl-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/vision.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n n e c t e d C o m p o n e n t s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConnectedComponentsImage() returns the connected-components of the image % uniquely labeled. The returned connected components image colors member % defines the number of unique objects. Choose from 4 or 8-way connectivity. % % You are responsible for freeing the connected components objects resources % with this statement; % % objects = (CCObjectInfo *) RelinquishMagickMemory(objects); % % The format of the ConnectedComponentsImage method is: % % Image *ConnectedComponentsImage(const Image *image, % const size_t connectivity,CCObjectInfo **objects, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o connectivity: how many neighbors to visit, choose from 4 or 8. % % o objects: return the attributes of each unique object. % % o exception: return any errors or warnings in this structure. % */ static int CCObjectInfoCompare(const void *x,const void *y) { CCObjectInfo *p, *q; p=(CCObjectInfo *) x; q=(CCObjectInfo *) y; return((int) (q->area-(ssize_t) p->area)); } MagickExport Image *ConnectedComponentsImage(const Image *image, const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception) { #define ConnectedComponentsImageTag "ConnectedComponents/Image" CacheView *image_view, *component_view; CCObjectInfo *object; char *c; const char *artifact; double area_threshold; Image *component_image; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *equivalences; register ssize_t i; size_t size; ssize_t first, last, n, step, y; /* Initialize connected components image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (objects != (CCObjectInfo **) NULL) *objects=(CCObjectInfo *) NULL; component_image=CloneImage(image,0,0,MagickTrue, exception); if (component_image == (Image *) NULL) return((Image *) NULL); component_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize connected components equivalences. */ size=image->columns*image->rows; if (image->columns != (size/image->rows)) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception); if (equivalences == (MatrixInfo *) NULL) { component_image=DestroyImage(component_image); return((Image *) NULL); } for (n=0; n < (ssize_t) (image->columns*image->rows); n++) (void) SetMatrixElement(equivalences,n,0,&n); object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object)); if (object == (CCObjectInfo *) NULL) { equivalences=DestroyMatrixInfo(equivalences); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(object,0,MaxColormapSize*sizeof(*object)); for (i=0; i < (ssize_t) MaxColormapSize; i++) { object[i].id=i; object[i].bounding_box.x=(ssize_t) image->columns; object[i].bounding_box.y=(ssize_t) image->rows; GetPixelInfo(image,&object[i].color); } /* Find connected components. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++) { ssize_t connect4[2][2] = { { -1, 0 }, { 0, -1 } }, connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } }, dx, dy; if (status == MagickFalse) continue; dy=connectivity > 4 ? connect8[n][0] : connect4[n][0]; dx=connectivity > 4 ? connect8[n][1] : connect4[n][1]; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel, target; ssize_t neighbor_offset, obj, offset, ox, oy, root; /* Is neighbor an authentic pixel and a different color than the pixel? */ GetPixelInfoPixel(image,p,&pixel); if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) || ((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows)) { p+=GetPixelChannels(image); continue; } neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx* GetPixelChannels(image); GetPixelInfoPixel(image,p+neighbor_offset,&target); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { p+=GetPixelChannels(image); continue; } /* Resolve this equivalence. */ offset=y*image->columns+x; neighbor_offset=dy*image->columns+dx; ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != ox) { ox=obj; status=GetMatrixElement(equivalences,ox,0,&obj); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != oy) { oy=obj; status=GetMatrixElement(equivalences,oy,0,&obj); } if (ox < oy) { status=SetMatrixElement(equivalences,oy,0,&ox); root=ox; } else { status=SetMatrixElement(equivalences,ox,0,&oy); root=oy; } ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,ox,0,&obj); status=SetMatrixElement(equivalences,ox,0,&root); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,oy,0,&obj); status=SetMatrixElement(equivalences,oy,0,&root); } status=SetMatrixElement(equivalences,y*image->columns+x,0,&root); p+=GetPixelChannels(image); } } } image_view=DestroyCacheView(image_view); /* Label connected components. */ n=0; image_view=AcquireVirtualCacheView(image,exception); component_view=AcquireAuthenticCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { ssize_t id, offset; offset=y*image->columns+x; status=GetMatrixElement(equivalences,offset,0,&id); if (id != offset) status=GetMatrixElement(equivalences,id,0,&id); else { id=n++; if (id >= (ssize_t) MaxColormapSize) break; } status=SetMatrixElement(equivalences,offset,0,&id); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x >= (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y >= (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].color.red+=QuantumScale*GetPixelRed(image,p); object[id].color.green+=QuantumScale*GetPixelGreen(image,p); object[id].color.blue+=QuantumScale*GetPixelBlue(image,p); if (image->alpha_trait != UndefinedPixelTrait) object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p); if (image->colorspace == CMYKColorspace) object[id].color.black+=QuantumScale*GetPixelBlack(image,p); object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; SetPixelIndex(component_image,(Quantum) id,q); p+=GetPixelChannels(image); q+=GetPixelChannels(component_image); } if (n > (ssize_t) MaxColormapSize) break; if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } component_view=DestroyCacheView(component_view); image_view=DestroyCacheView(image_view); equivalences=DestroyMatrixInfo(equivalences); if (n > (ssize_t) MaxColormapSize) { object=(CCObjectInfo *) RelinquishMagickMemory(object); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"TooManyObjects"); } component_image->colors=(size_t) n; for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].color.red=QuantumRange*(object[i].color.red/object[i].area); object[i].color.green=QuantumRange*(object[i].color.green/object[i].area); object[i].color.blue=QuantumRange*(object[i].color.blue/object[i].area); if (image->alpha_trait != UndefinedPixelTrait) object[i].color.alpha=QuantumRange*(object[i].color.alpha/object[i].area); if (image->colorspace == CMYKColorspace) object[i].color.black=QuantumRange*(object[i].color.black/object[i].area); object[i].centroid.x=object[i].centroid.x/object[i].area; object[i].centroid.y=object[i].centroid.y/object[i].area; } artifact=GetImageArtifact(image,"connected-components:area-threshold"); area_threshold=0.0; if (artifact != (const char *) NULL) area_threshold=StringToDouble(artifact,(char **) NULL); if (area_threshold > 0.0) { /* Merge object below area threshold. */ component_view=AcquireAuthenticCacheView(component_image,exception); for (i=0; i < (ssize_t) component_image->colors; i++) { double census; RectangleInfo bounding_box; register ssize_t j; size_t id; if (status == MagickFalse) continue; if ((double) object[i].area >= area_threshold) continue; for (j=0; j < (ssize_t) component_image->colors; j++) object[j].census=0; bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height+2; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1, bounding_box.y+y-1,bounding_box.width+2,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width+2; x++) { j=(ssize_t) GetPixelIndex(component_image,p); if (j != i) object[j].census++; p+=GetPixelChannels(component_image); } } census=0; id=0; for (j=0; j < (ssize_t) component_image->colors; j++) if (census < object[j].census) { census=object[j].census; id=(size_t) j; } object[id].area+=object[i].area; for (y=0; y < (ssize_t) bounding_box.height; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,q) == i) SetPixelIndex(component_image,(Quantum) id,q); q+=GetPixelChannels(component_image); } if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; } } component_view=DestroyCacheView(component_view); (void) SyncImage(component_image,exception); } artifact=GetImageArtifact(image,"connected-components:mean-color"); if (IsStringTrue(artifact) != MagickFalse) { /* Replace object with mean color. */ for (i=0; i < (ssize_t) component_image->colors; i++) component_image->colormap[i]=object[i].color; } artifact=GetImageArtifact(image,"connected-components:keep"); if (artifact != (const char *) NULL) { /* Keep these object (make others transparent). */ for (i=0; i < (ssize_t) component_image->colors; i++) object[i].census=0; for (c=(char *) artifact; *c != '\0';) { while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) object[first].census++; } for (i=0; i < (ssize_t) component_image->colors; i++) { if (object[i].census != 0) continue; component_image->alpha_trait=BlendPixelTrait; component_image->colormap[i].alpha_trait=BlendPixelTrait; component_image->colormap[i].alpha=(MagickRealType) TransparentAlpha; } } artifact=GetImageArtifact(image,"connected-components:remove"); if (artifact != (const char *) NULL) { /* Remove these object (make them transparent). */ for (c=(char *) artifact; *c != '\0';) { while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) { component_image->alpha_trait=BlendPixelTrait; component_image->colormap[first].alpha_trait=BlendPixelTrait; component_image->colormap[first].alpha=(MagickRealType) TransparentAlpha; } } } (void) SyncImage(component_image,exception); artifact=GetImageArtifact(image,"connected-components:verbose"); if ((IsStringTrue(artifact) != MagickFalse) || (objects != (CCObjectInfo **) NULL)) { /* Report statistics on unique object. */ for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width=0; object[i].bounding_box.height=0; object[i].bounding_box.x=(ssize_t) component_image->columns; object[i].bounding_box.y=(ssize_t) component_image->rows; object[i].centroid.x=0; object[i].centroid.y=0; object[i].area=0; } component_view=AcquireVirtualCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns, 1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { size_t id; id=GetPixelIndex(component_image,p); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x > (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y > (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; p+=GetPixelChannels(component_image); } } for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].centroid.x=object[i].centroid.x/object[i].area; object[i].centroid.y=object[i].centroid.y/object[i].area; } component_view=DestroyCacheView(component_view); qsort((void *) object,component_image->colors,sizeof(*object), CCObjectInfoCompare); if (objects == (CCObjectInfo **) NULL) { (void) fprintf(stdout, "Objects (id: bounding-box centroid area mean-color):\n"); for (i=0; i < (ssize_t) component_image->colors; i++) { char mean_color[MagickPathExtent]; if (status == MagickFalse) break; if (object[i].area <= area_threshold) continue; GetColorTuple(&object[i].color,MagickFalse,mean_color); (void) fprintf(stdout, " %.20g: %.20gx%.20g%+.20g%+.20g %.1f,%.1f %.20g %s\n",(double) object[i].id,(double) object[i].bounding_box.width,(double) object[i].bounding_box.height,(double) object[i].bounding_box.x, (double) object[i].bounding_box.y,object[i].centroid.x, object[i].centroid.y,(double) object[i].area,mean_color); } } } if (objects == (CCObjectInfo **) NULL) object=(CCObjectInfo *) RelinquishMagickMemory(object); else *objects=object; return(component_image); }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static double **DestroyPixelThreadSet(double **pixels) { register ssize_t i; assert(pixels != (double **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (double *) NULL) pixels[i]=(double *) RelinquishMagickMemory(pixels[i]); pixels=(double **) RelinquishMagickMemory(pixels); return(pixels); } static double **AcquirePixelThreadSet(const size_t columns, const size_t channels) { double **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(double **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (double **) NULL) return((double **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(double *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (double *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif #if defined(MAGICKCORE_LCMS_DELEGATE) static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) context; if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'",image->filename); } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); /* Future. value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image,exception); */ icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsHPROFILE source_profile; CMSExceptionInfo cms_exception; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(CMSExceptionHandler); cms_exception.image=image; cms_exception.exception=exception; (void) cms_exception; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags, source_type, target_type; double **magick_restrict source_pixels, source_scale, **magick_restrict target_pixels, target_scale; int intent; MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } source_scale=1.0; source_channels=3; switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_type=(cmsUInt32Number) TYPE_CMYK_DBL; source_channels=4; source_scale=100.0; break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_type=(cmsUInt32Number) TYPE_GRAY_DBL; source_channels=1; break; } case cmsSigLabData: { source_colorspace=LabColorspace; source_type=(cmsUInt32Number) TYPE_Lab_DBL; source_scale=100.0; break; break; } case cmsSigRgbData: { source_colorspace=sRGBColorspace; source_type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; source_type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: { source_colorspace=UndefinedColorspace; source_type=(cmsUInt32Number) TYPE_RGB_DBL; break; } } signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); target_scale=1.0; target_channels=3; switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_type=(cmsUInt32Number) TYPE_CMYK_DBL; target_channels=4; target_scale=0.01; break; } case cmsSigLabData: { target_colorspace=LabColorspace; target_type=(cmsUInt32Number) TYPE_Lab_DBL; target_scale=0.01; break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_type=(cmsUInt32Number) TYPE_GRAY_DBL; target_channels=1; break; } case cmsSigRgbData: { target_colorspace=sRGBColorspace; target_type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; target_type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: { target_colorspace=UndefinedColorspace; target_type=(cmsUInt32Number) TYPE_RGB_DBL; break; } } if ((source_colorspace == UndefinedColorspace) || (target_colorspace == UndefinedColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (double **) NULL) || (target_pixels == (double **) NULL)) { transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register double *p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=source_scale*QuantumScale*GetPixelRed(image,q); if (source_channels > 1) { *p++=source_scale*QuantumScale*GetPixelGreen(image,q); *p++=source_scale*QuantumScale*GetPixelBlue(image,q); } if (source_channels > 3) *p++=source_scale*QuantumScale*GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_channels == 1) SetPixelGray(image,ClampToQuantum(target_scale* QuantumRange*(*p)),q); else SetPixelRed(image,ClampToQuantum(target_scale* QuantumRange*(*p)),q); p++; if (target_channels > 1) { SetPixelGreen(image,ClampToQuantum(target_scale* QuantumRange*(*p)),q); p++; SetPixelBlue(image,ClampToQuantum(target_scale* QuantumRange*(*p)),q); p++; } if (target_channels > 3) { SetPixelBlack(image,ClampToQuantum(target_scale* QuantumRange*(*p)),q); p++; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ProfileImage) #endif proceed=SetImageProgress(image,ProfileImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent], property[MagickPathExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MagickPathExtent,"%s:*",name); (void) GetImageProperty(image,property,exception); return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in \#pragma weak before /// declared. Rare. May alias another identifier, declared or undeclared. /// /// For aliases, the target identifier is used as a key for eventual /// processing when the target is declared. For the single-identifier form, /// the sole identifier is used as the key. Each entry is a `SetVector` /// (ordered by parse order) of aliases (identified by the alias name) in case /// of multiple aliases to the same undeclared identifier. llvm::MapVector< IdentifierInfo *, llvm::SetVector< WeakInfo, llvm::SmallVector<WeakInfo, 1u>, llvm::SmallDenseSet<WeakInfo, 2u, WeakInfo::DenseMapInfoByAliasOnly>>> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// The C++ "std::source_location::__impl" struct, defined in /// \<source_location>. RecordDecl *StdSourceLocationImplDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl *, 2> Pair; public: SpecialMemberOverloadResult() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S); ~FPFeaturesStateRAII(); FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; LangOptions::FPEvalMethodKind OldEvalMethod; SourceLocation OldFPPragmaLocation; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; private: Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; bool WarnedDarwinSDKInfoMissing = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool IsPartition = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// The global module fragment of the current translation unit. clang::Module *GlobalModuleFragment = nullptr; /// The modules we imported directly. llvm::SmallPtrSet<clang::Module *, 8> DirectModuleImports; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } bool isModuleDirectlyImported(const Module *M) { return DirectModuleImports.contains(M); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' PartitionInterface, ///< 'export module X:Y;' PartitionImplementation, ///< 'module X:Y;' }; /// An enumeration to represent the transition of states in parsing module /// fragments and imports. If we are not parsing a C++20 TU, or we find /// an error in state transition, the state is set to NotACXX20Module. enum class ModuleImportState { FirstDecl, ///< Parsing the first decl in a TU. GlobalFragment, ///< after 'module;' but before 'module X;' ImportAllowed, ///< after 'module X;' but before any non-import decl. ImportFinished, ///< after any non-import decl. PrivateFragment, ///< after 'module :private;'. NotACXX20Module ///< Not a C++20 TU, or an invalid state was found. }; private: /// The parser has begun a translation unit to be compiled as a C++20 /// Header Unit, helper for ActOnStartOfTranslationUnit() only. void HandleStartOfHeaderUnit(); public: /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, ModuleIdPath Partition, ModuleImportState &ImportState); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module toplevel name as an access path. /// \param IsPartition If the name is for a partition. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path, bool IsPartition = false); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); /// If \p AllowLambda is true, treat lambda as function. DeclContext *getFunctionLevelDeclContext(bool AllowLambda = false); /// Returns a pointer to the innermost enclosing function, or nullptr if the /// current context is not inside a function. If \p AllowLambda is true, /// this can return the call operator of an enclosing lambda, otherwise /// lambdas are skipped when looking for an enclosing function. FunctionDecl *getCurFunctionDecl(bool AllowLambda = false); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base, MultiExprArg Args); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A, bool SkipArgCountCheck = false); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A, bool SkipArgCountCheck = false); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI, const Expr *E, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributes &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributes &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, bool AllowRecovery = false); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, MultiExprArg ArgExprs, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN(), __builtin_source_location() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, QualType ResultTy, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, unsigned LambdaDependencyKind, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; /// Introduce the instantiated function parameters into the local /// instantiation scope, and set the parameter names to those used /// in the template. bool addInstantiatedParametersToScope( FunctionDecl *Function, const FunctionDecl *PatternDecl, LocalInstantiationScope &Scope, const MultiLevelTemplateArgumentList &TemplateArgs); public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, const ParsedAttributesView &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, const SourceRange &, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, ArrayRef<Expr *> ArgExprs, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occurred, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, const AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs( const NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } void ActOnPragmaFPEvalMethod(SourceLocation Loc, LangOptions::FPEvalMethodKind Value); /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, const IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive with indirect clause. Optional<Expr *> Indirect; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, DeclareTargetContextInfo &DTCI); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true if currently in OpenMP task with untied clause context. bool isInOpenMPTaskUntiedContext() const; /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp teams loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams loop' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetTeamsGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPParallelGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel loop' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetParallelGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion, bool AllowBoolOperation, bool ReportInvalid); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); // type checking for sizeless vector binary operators. QualType CheckSizelessVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, ArithConvKind OperationKind); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; QualType PreferredConditionType(ConditionKind K) const { return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy; } ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK, bool MissingOK = false); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc, bool Braced); QualType ProduceCtorInitMemberSignatureHelp( Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc, bool Braced); QualType ProduceTemplateArgumentSignatureHelp( TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, BinaryOperatorKind Opcode); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const SourceLocation CallExprLoc, const NamedDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, llvm::DenseSet<QualType> Visited, ValueDecl *DeclToCheck); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
imageprojection_ops.h
#include <omp.h> #ifdef _MSC_VER #define IPOPS_INLINE __inline #include <float.h> #define IPOPS_ISINF(x) (!_finite(x)) #define IPOPS_ISNAN(x) (_isnan(x)) // MSVC STILL doesn't support C11 typedef uint32_t atomicpixel_t; #else //#include <pthread.h> #include <fenv.h> #include <stdatomic.h> #define IPOPS_INLINE inline #define IPOPS_ISINF isinf #define IPOPS_ISNAN isnan typedef _Atomic uint32_t atomicpixel_t; /* underlying data is actually a float, but we use this type because it permits us to use atomic operations. YOU WILL GET BAD DATA IF YOU ACCESS THIS DIRECTLY!!! */ #endif static IPOPS_INLINE float32_t *atomicbuf_sanitize(atomicpixel_t *buf,size_t n) { /* sanitize atomic data in buf so it is OK to read it as floats */ int64_t cnt; union cvt { float32_t *fptr; atomicpixel_t *iptr; } cvtptr; assert(sizeof(atomicpixel_t)==4); assert(sizeof(float32_t)==4); assert(sizeof(uint32_t)==4); cvtptr.iptr = buf; #pragma omp parallel default(shared) private(cnt) //,n) #pragma omp for for (cnt=0;cnt < n;cnt++) { union { char cbuf[4]; float32_t fbuf; uint32_t ibuf; } cvtbuf; memcpy(&cvtbuf.ibuf,&cvtptr.iptr[cnt],4); memcpy(&cvtptr.fptr[cnt],&cvtbuf.fbuf,4); } return cvtptr.fptr; } static IPOPS_INLINE atomicpixel_t *atomicbuf_prepare(float32_t *buf,size_t n) { /* prepare float data in buf so it is OK to read it as atomics */ int64_t cnt; assert(sizeof(atomicpixel_t)==4); assert(sizeof(float32_t)==4); assert(sizeof(uint32_t)==4); union cvt { float32_t *fptr; atomicpixel_t *iptr; } cvtptr; cvtptr.fptr = buf; #pragma omp parallel default(shared) private(cnt)//,n) #pragma omp for for (cnt=0;cnt < n;cnt++) { union { char cbuf[4]; float32_t fbuf; atomicpixel_t ibuf; } cvtbuf; memcpy(&cvtbuf.fbuf,&cvtptr.fptr[cnt],4); memcpy(&cvtptr.iptr[cnt],&cvtbuf.ibuf,4); } return cvtptr.iptr; } static IPOPS_INLINE float32_t atomicpixel_load(volatile atomicpixel_t *var) { // Use of a union like this is legal in C11, even under the strictest // aliasing rules union { uint32_t intval; float32_t floatval; } pixval; pixval.intval=atomic_load_explicit(var,memory_order_acquire);//,memory_order_consume); return pixval.floatval; } static IPOPS_INLINE float32_t atomicpixel_nonatomicload(atomicpixel_t *var) { // Use of a union like this is legal in C11, even under the strictest // aliasing rules union { uint32_t intval; float32_t floatval; } pixval; pixval.intval=*var; return pixval.floatval; } static IPOPS_INLINE void atomicpixel_store(volatile atomicpixel_t *var,float32_t val) { // Use of a union like this is legal in C11, even under the strictest // aliasing rules union { uint32_t intval; float32_t floatval; } pixval; pixval.floatval=val; atomic_store_explicit(var,pixval.intval,memory_order_release); } static IPOPS_INLINE void atomicpixel_nonatomicstore(atomicpixel_t *var,float32_t val) { // Use of a union like this is legal in C11, even under the strictest // aliasing rules union { uint32_t intval; float32_t floatval; } pixval; pixval.floatval=val; *var=pixval.intval; } //pthread_mutex_t accumulatemutex=PTHREAD_MUTEX_INITIALIZER; static IPOPS_INLINE void atomicpixel_accumulate(volatile atomicpixel_t *var,float toadd) { // Use of a union like this is legal in C11, even under the strictest // aliasing rules union { uint32_t intval; float32_t floatval; char workbuf[4]; } oldvalue,newvalue; // ,workvalue; // pthread_mutex_lock(&accumulatemutex); //oldvalue.floatval=atomicpixel_load(var); oldvalue.intval=atomic_load_explicit(var,memory_order_acquire);//,memory_order_consume); do { //memcpy(workvalue.workbuf,&oldvalue.intval,4); newvalue.floatval=oldvalue.floatval+toadd; //workvalue.floatval+=toadd; //memcpy(&newvalue.intval,&workvalue.workbuf,4); } while (!atomic_compare_exchange_strong_explicit(var,&oldvalue.intval,newvalue.intval,memory_order_seq_cst,memory_order_acquire)); //,memory_order_consume)); // pthread_mutex_unlock(&accumulatemutex); } static IPOPS_INLINE void atomicbuf_zero(atomicpixel_t *buf,size_t n) { /* sanitize atomic data in buf so it is OK to read it as floats */ int64_t cnt; assert(sizeof(atomicpixel_t)==4); assert(sizeof(float32_t)==4); #pragma omp parallel default(shared) private(cnt) //,n) #pragma omp for for (cnt=0;cnt < n;cnt++) { atomicpixel_nonatomicstore(&buf[cnt],0.0); } } static IPOPS_INLINE void normalize_atomic_buffer(atomicpixel_t *tonormalize,atomicpixel_t *normbuf,size_t n) { /* Divide each element in tonormalize by the corresponding element in normbuf */ int64_t cnt; assert(sizeof(atomicpixel_t)==4); assert(sizeof(float32_t)==4); #pragma omp parallel default(shared) private(cnt)//,n) #pragma omp for for (cnt=0;cnt < n;cnt++) { float32_t value,normalization; value=atomicpixel_nonatomicload(&tonormalize[cnt]); normalization=atomicpixel_nonatomicload(&normbuf[cnt]); value/=normalization; atomicpixel_nonatomicstore(&tonormalize[cnt],value); } } static IPOPS_INLINE void atomicbuffer_dropthreshold(atomicpixel_t *buf,atomicpixel_t *thresholdtestbuf,float32_t threshold,size_t n) { /* Set each element in buf corresponding to an element of thresholdtestbuf that is less than threshold to 0 */ int64_t cnt; assert(sizeof(atomicpixel_t)==4); assert(sizeof(float32_t)==4); #pragma omp parallel default(shared) private(cnt)//,n) #pragma omp for for (cnt=0;cnt < n;cnt++) { float32_t thresholdtest; thresholdtest=atomicpixel_nonatomicload(&thresholdtestbuf[cnt]); if (thresholdtest < threshold) { //bufval=atomicpixel_nonatomicload(&buf[cnt]); atomicpixel_nonatomicstore(&buf[cnt],0.0); } } } // C++: This should be implemented as an abstract interface // that can either use a meshed representation (like this one) // or a NURBS representation, depending on the surface object. // ... Might also call OpenCL to do the heavy lifting //#include <math.h> ///* Right now, OpenCV is used solely for matrix inversion... */ //#include <opencv2/core/version.hpp> //#if (CV_MAJOR_VERSION >= 3) //#include "opencv2/core/fast_math.hpp" // Temporary workaround for https://github.com/opencv/opencv/issues/6585 //#endif //#include <opencv/cv.h> /* splat characteristics for projecttoimgbuf */ #define MIN_RADIUS_UV_PIXELS 1.5 #define MIN_RADIUS_SRC_PIXELS 1.5 //#define BANDWIDTH_FRACTION 0.7 //#define BANDWIDTH_FRACTION 0.25 #define BANDWIDTH_FRACTION 0.4 struct projectionsurface { atomicpixel_t *imgbuf; atomicpixel_t *weightingbuf; atomicpixel_t *validitybuf; atomicpixel_t *angle_of_incidence_factor_buf_uv; size_t nx; size_t ny; float64_t u_meaningfulunits_per_texcoord; float64_t v_meaningfulunits_per_texcoord; float64_t *tocameraframe; // 4x4 float64_t *tosurfaceframe; // 4x4 float64_t *vecfromsurfaceframe; // 3x3 float64_t *boxcoords; // nboxes x 6 size_t nboxes; int32_t *boxes; // nboxes x 9 int32_t *boxpolys; uint32_t *vertexidx_indices; // npolys int32_t *vertexidx; // indexed by vertexidx_indices, -1 terminator after each polygon uint32_t *numvertices; // npolys size_t max_vertices; //size_t nvertices; size_t npolys; float64_t *vertices; // nvertices x 3 float64_t *facetnormals; float64_t *refpoints; float64_t *inplanemats; //float64_t *outofplanevec; float64_t *inplane2texcoords; // 2x3 projective matrix float64_t *maxradius; // n_polys }; static IPOPS_INLINE void iterate_diffuse_mismatch(float32_t *upper_bound, float32_t *value, float32_t *extra_buf, double delta_tbar, uint64_t ny, uint64_t nx, double dybarsq, double dxbarsq, uint64_t ntbar) { uint64_t tcnt; int64_t ycnt; // must be signed for msvc compatibility float32_t *ping_buf; float32_t *pong_buf; float32_t *temp_ptr; if (ntbar % 2) ntbar++; // ntbar must be even so our result ends up // in value, not in extra_buf // interchange ping_buf and pong_buf every iteration ping_buf = value; pong_buf = extra_buf; for (tcnt=0;tcnt < ntbar;tcnt++,temp_ptr=ping_buf,ping_buf=pong_buf,pong_buf=temp_ptr) { // exceedance_zone = value > upper_bound // value[ exceedance_zone ]=upper_bound[ exceedance_zone ] # Assign specified value condition where the value exceeds the upper bound #pragma omp parallel default(shared) private(ycnt)//,ny) { #pragma omp for for (ycnt=0;ycnt < ny;ycnt++) { uint64_t xcnt; for(xcnt=0;xcnt < nx;xcnt++) { if (ping_buf[nx*ycnt + xcnt] > upper_bound[nx*ycnt + xcnt]) { ping_buf[nx*ycnt + xcnt] = upper_bound[nx*ycnt + xcnt]; } } } } #pragma omp parallel default(shared) private(ycnt)//,ny) { // Iterate time // T_t+ = T_t- + dt*( (T_x+ -2T + T_x-)/dx^2 + (T_y+ -2T + T_y-)/dy^2 ) #pragma omp for for (ycnt=1;ycnt < ny-1;ycnt++) { uint64_t xcnt; for(xcnt=1;xcnt < nx-1;xcnt++) { // value[1:-1,1:-1]=value[1:-1,1:-1] + delta_tbar * ( ((value[1:-1,2:] - 2*value[1:-1,1:-1] + value[1:-1,:-2])/dxbar**2) + ((value[2:,1:-1] - 2*value[1:-1,1:-1] + value[:-2,1:-1])/dybar**2) ) pong_buf[nx*ycnt + xcnt] = ping_buf[nx*ycnt + xcnt] + delta_tbar * ( ((ping_buf[nx*ycnt + xcnt+1] - 2*ping_buf[nx*ycnt + xcnt] + ping_buf[nx*ycnt + xcnt-1])/dxbarsq) + ((ping_buf[nx*(ycnt+1) + xcnt] - 2*ping_buf[nx*ycnt + xcnt] + ping_buf[nx*(ycnt-1) + xcnt])/dybarsq)); } } } } } static IPOPS_INLINE int ray_box_intersection(float64_t *boxcoords, float64_t *starting_point, float64_t *ray_direc) { /* Slab method: Look at distance t along the ray where we first intersect each slab. That's tnear. Look at the distance t along the ray where we last intersect each slab. That's tfar. Look at the largest tnear value. If that's greater than the smallest tfar vluae, the ray misses the box. Also special cases if the ray is parallel to an axis */ /* See http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm */ //double tnear=infnan(-ERANGE); /* -infinity */ //double tfar=infnan(ERANGE); /* infinity */ /*** NEED MSVC FIX ***/ #ifdef _MSC_VER uint64_t infinity_val=0x000000007FF00000l; uint64_t neginfinity_val=0x00000000FFF00000l; double tnear,tfar; tnear = *(float64_t *)&neginfinity_val; /* -infinity */ tfar = *(float64_t *)&infinity_val; /* infinity */ #else double tnear=-1.0/0.0; /* -infinity */ double tfar=1.0/0.0; /* infinity */ #endif { double curnear,curfar,temp; int index; int max_index; for (index=0;index < 3;index++) { /* index indexes starting_point and ray_direc, and finds the min value from box_coords */ max_index=index+3; /* finds the max value from box_coords */ /* Start with X=xmin and X=xmax planes */ if (ray_direc[index] == 0.0) { /* Ray is normal to given axis, parallel to these planes */ /* if origin not between planes, does not intersect */ if (starting_point[index] < boxcoords[index] || starting_point[index] > boxcoords[max_index]) { return FALSE; } } else { curnear=(boxcoords[index]-starting_point[index])/ray_direc[index]; /* distance to reach x value of boxcoords[0] */ curfar=(boxcoords[max_index]-starting_point[index])/ray_direc[index]; /* did we get curnear and curfar in the correct order? */ if (curfar < curnear) { /* swap */ temp=curfar; curfar=curnear; curnear=temp; } if (curnear > tnear) { tnear=curnear; /* largest tnear value */ } if (curfar < tfar) { tfar=curfar; /* smallest tfar value */ } if (tnear > tfar) { /* missed box */ return FALSE; } if (tfar < 0.0) { /* box is behind */ return FALSE; } } } return TRUE; } } static IPOPS_INLINE float64_t ray_to_plane_distance(float64_t *starting_point,float64_t *ray_direc, float64_t *planepoint, float64_t *planenormal) { // starting point and planepoint should be 3-vectors (or normalized 4-vectors) // (planepoint - raypoint) dot planenormal / (raydirec dot planenormal) // returns -1.0 if ray is parallel to plane float64_t pointdiff[3]; float64_t denominator; subvecvec3(planepoint,starting_point,pointdiff); denominator=dotvecvec3(ray_direc,planenormal); if (denominator==0.0) return -1.0; return dotvecvec3(pointdiff,planenormal)/denominator; } static IPOPS_INLINE void ray_to_plane_raydirec_shift(float64_t *starting_point,float64_t *ray_direc, float64_t *planepoint, float64_t *planenormal,float64_t *ray_direc_deriv,float64_t *deriv) /* Calculate change in ray_to_plane intersection point with respect to ray_direc (change in normalized vector) */ { // starting point and planepoint should be 3-vectors (or normalized 4-vectors) // (planepoint - raypoint) dot planenormal / (raydirec dot planenormal) // returns -1.0 if ray is parallel to plane float64_t pointdiff[3],firstterm[3],secterm[3],diff[3]; float64_t ray_direc_dot_planenormal; // Regular evaluation of intersection point subvecvec3(planepoint,starting_point,pointdiff); ray_direc_dot_planenormal=dotvecvec3(ray_direc,planenormal); //intersectpoint = startingpoint + dotvecvec3(pointdiff,planenormal)/ray_direc_dot_planenormal * ray_direc; // We need to calculate dintersectpoint/dray_direc: // pointdiff is constant, planenormal is constant //intersectpoint = startingpoint + dotvecvec3(pointdiff,planenormal)* (ray_direc/(ray_direc dot planenormal) // deriv intersectpoint = dotvecvec3(pointdiff,planenormal)* ( deriv ray_direc * (ray_direc dot planenormal) - ((deriv ray_direc) dot planenormal) * ray_direc)/(ray_direc dot planenormal)^2 // deriv intersectpoint = (dotvecvec3(pointdiff,planenormal)/(ray_direc_dot_planenormal)^2) * ( deriv ray_direc * (ray_direc dot planenormal) - ((deriv ray_direc) dot planenormal) * ray_direc) scalevec3(ray_direc_dot_planenormal,ray_direc_deriv,firstterm); scalevec3(dotvecvec3(ray_direc_deriv,planenormal),ray_direc,secterm); subvecvec3(firstterm,secterm,diff); scalevec3(dotvecvec3(pointdiff,planenormal)/(ray_direc_dot_planenormal*ray_direc_dot_planenormal),diff,deriv); // store result in deriv // you should multiply return value by discrete shift (factor for ray_direc_deriv) // (but this is 1.0 for a one pixel shift if factors are correct on the way in) } static IPOPS_INLINE void ray_to_plane_raypos_shift(float64_t *starting_point,float64_t *ray_direc, float64_t *planepoint, float64_t *planenormal,float64_t *starting_point_direc_deriv,float64_t *deriv) /* Calculate change in ray_to_plane intersection point with respect to ray_direc (change in normalized vector) */ { // starting point and planepoint should be 3-vectors (or normalized 4-vectors) // (planepoint - raypoint) dot planenormal / (raydirec dot planenormal) // returns -1.0 if ray is parallel to plane float64_t pointdiff[3]; float64_t ray_direc_dot_planenormal; // Regular evaluation of intersection point subvecvec3(planepoint,starting_point,pointdiff); ray_direc_dot_planenormal=dotvecvec3(ray_direc,planenormal); //intersectpoint = startingpoint + dotvecvec3(planepoint-starting_point,planenormal)/ray_direc_dot_planenormal * ray_direc; // We need to calculate dintersectpoint/dstartingpoint: // ray_direc is constant, planenormal is constant //intersectpoint = startingpoint + dotvecvec3(planepoint-starting_point,planenormal)*(ray_direc/(ray_direc dot planenormal)) //intersectpoint = startingpoint + dotvecvec3(planepoint,planenormal)*(ray_direc/(ray_direc dot planenormal)) - dotvecvec3(starting_point,planenormal)*(ray_direc/(ray_direc dot planenormal)) // deriv intersectpoint = deriv startingpoint - dotvecvec3(deriv starting_point,planenormal)*(ray_direc/(ray_direc dot planenormal)) addvecscaledvec3(starting_point_direc_deriv,-dotvecvec3(starting_point_direc_deriv,planenormal)/ray_direc_dot_planenormal,ray_direc,deriv); // you should multiply return value by discrete shift (factor for ray_direc_deriv) // (but this is 1.0 for a one pixel shift if factors are correct on the way in) } static IPOPS_INLINE int ray_intersects_polygon(float64_t *vertexarray, int32_t *vertexids, uint32_t numvertices, float64_t *normal_vector, // unit vector float64_t *refpoint, float64_t *inplanemat, // 2x3 matrix: two orthogonal unit vectors normal to normal_vector float64_t *starting_point, float64_t *ray_direc, float64_t maxradius, float64_t ray_to_plane_dist, int include_edges,int trace) { float64_t intersectpoint[3]; float64_t u1,v1,u2,v2; float64_t windingnum=0.0; size_t vertnum,nextvertnum; float64_t vec1[3]; float64_t vec2[3]; float64_t intersectdist[3]; float64_t magn1,magn2,det,cosparam; addvecscaledvec3(starting_point,ray_to_plane_dist,ray_direc,intersectpoint); subvecvec3(intersectpoint,refpoint,intersectdist); /* checking whether intersection point within maximum outer radius of polygon */ if (trace) { fprintf(stderr,"intersectpoint=%g,%g,%g; refpoint=%g,%g,%g\n",intersectpoint[0],intersectpoint[1],intersectpoint[2],refpoint[0],refpoint[1],refpoint[2]); fprintf(stderr,"intersectdist=%g; maxradius=%g\n",normvec3(intersectdist),maxradius); { float64_t intersect_outofplane=dotvecvec3(intersectdist,normal_vector); fprintf(stderr,"out of plane component = %g\n",intersect_outofplane); //if (fabs(intersect_outofplane) > (0.3*normvec3(intersectdist))) { // *((char *)0) = 0; // force segfault // } } } if (normvec3(intersectdist) <= maxradius) { if (trace) fprintf(stderr,"within outer radius\n"); /* Within outer radius */ // for the ray, determine if intersectpoint is inside the polygon // Apply winding number algorithm. // This algorithm is selected -- in its most simple form -- // because it is so simple and robust in the case of the // intersect point being on or near the edge. It may well // be much slower than optimal. // // Should probably implement a faster algorithm then drop // down to this for the special cases. // See Hormann and Agathos, The point in polygon problem // for arbitrary polygons, Computational Geometry 20(3) 131-144 (2001) // http://dx.doi.org/10.1016/S0925-7721(01)00012-8 // https://pdfs.semanticscholar.org/e90b/d8865ddb7c7af2b159d413115050d8e5d297.pdf // Winding number is sum over segments of // acos((point_to_vertex1 dot point_to_vertex2)/(magn(point_to_vertex1)*magn(point_to_vertex_2))) * sign(det([ point_to_vertex1 point_to_vertex2 ])) // where sign(det) is really: What is the sign of the z // component of (point_to_vertex1 cross point_to_vertex2) // // Special cases: magn(point_to_vertex1)==0 or // magn_point_to_vertex2 -> point is on edge // det([ point_to_vertex1 point_to_vertex2 ]) = 0 -> point may be on edge // for (vertnum=0;vertnum < numvertices && vertexids[vertnum] >= 0;vertnum++) { nextvertnum=vertnum+1; if (vertexids[nextvertnum] < 0 || nextvertnum >= numvertices) nextvertnum=0; // calculate (thisvertex - intersectionpoint) -> vec1 subvecvec3(&vertexarray[vertexids[vertnum]*3],intersectpoint,vec1); // calculate (nextvertex - intersectionpoint) -> vec2 subvecvec3(&vertexarray[vertexids[nextvertnum]*3],intersectpoint,vec2); // Project points into 2-space: u1=dotvecvec3(vec1,inplanemat+0); v1=dotvecvec3(vec1,inplanemat+3); u2=dotvecvec3(vec2,inplanemat+0); v2=dotvecvec3(vec2,inplanemat+3); magn1 = sqrt(u1*u1 + v1*v1); magn2 = sqrt(u2*u2 + v2*v2); if (magn1==0.0 || magn2==0.0) { return include_edges; } /* Normalize vectors */ u1 /= magn1; v1 /= magn1; u2 /= magn2; v2 /= magn2; det=(u1*v2-u2*v1); // matrix determinant cosparam=(u1*u2 + v1*v2); // /(magn1*magn2); if (cosparam < -1.0) { cosparam=-1.0; // Shouldn't be possible...just in case of weird roundoff } if (cosparam > 1.0) { cosparam=1.0; // Shouldn't be possible...just in case of weird roundoff } if (det > 0) { windingnum += acos(cosparam); } else if (det < 0) { windingnum -= acos(cosparam); } else { /* det==0 */ /* Vectors parallel or anti-parallel */ if (cosparam > 0.9) { // Vectors parallel. We are OUTSIDE. Do Nothing */ } else if (cosparam < -0.9) { // Vectors anti-parallel. We are ON EDGE */ return include_edges; } else { assert(0); /* Should only be able to get cosparam = +/- 1.0 if det==0.0 */ } } } windingnum=fabs(windingnum)*(1.0/(2.0*M_PI)); // divide out radians to number of winds; don't care about clockwise vs. ccw if (windingnum > 0.999 && windingnum < 1.001) { //fprintf(stderr,"Winding number ~1.0\n"); return TRUE; /* almost exactly one loop */ } if (windingnum < 0.001) { //fprintf(stderr,"Winding number ~0.0\n"); return FALSE; } fprintf(stderr,"imageprojection_ops.h/ray_intersects_polygon Got weird winding number of %e; assuming inaccurate calculation on polygon edge\n",windingnum); // Could also be self intersecting polygon return include_edges; } return FALSE; } static IPOPS_INLINE int box_contains_polygon(struct projectionsurface *surf,size_t boxnum,size_t polynum,int trace) /* used for debugging only */ { int retval=0; size_t cnt,subbox; if (surf->boxes[boxnum*9 + 8]!=-1) { /* Have index into boxpolys array: This box contains polygons */ for (cnt=surf->boxes[boxnum*9 + 8];surf->boxpolys[cnt] >= 0;cnt++) { /* Got a polygon */ if (polynum==surf->boxpolys[cnt]) { if (trace) { fprintf(stderr,"Box %d directly contains polygon %d\n",(int)boxnum,(int)polynum); } return TRUE; } } } /* This box may be sub-divided into 8 */ for (subbox=0;subbox < 8; subbox++) { if (surf->boxes[boxnum*9+subbox] >= 0) { retval = retval || box_contains_polygon(surf,surf->boxes[boxnum*9 + subbox],polynum,FALSE); } } if (retval && trace) { fprintf(stderr,"Box %d indirectly contains polygon %d\n",(int)boxnum,(int)polynum); } else if (trace) { fprintf(stderr,"Box %d does not contain polygon %d\n",(int)boxnum,(int)polynum); } return retval; } static IPOPS_INLINE int find_ray_intersections(struct projectionsurface *surf, size_t surfacecnt, size_t boxnum, float64_t *starting_point, float64_t *ray_direc, float32_t *zbufpt, uint32_t *surfaceidpt, uint32_t *facetidpt,int trace) /* returns nonzero of at least one nearer intersection was found */ { size_t cnt,subbox; float64_t dist; size_t firstidx; size_t polynum; int retval=FALSE; if (trace) fprintf(stderr,"find_ray_intersections(boxnum=%d)\n",(int)boxnum); if (!ray_box_intersection(&surf->boxcoords[boxnum*6],starting_point,ray_direc)) { /* Ray does not pass through our box. We can not possibly have an intersection */ if (trace) { fprintf(stderr,"find_ray_intersections(): Ray does not intersect box %d\n",(int)boxnum); //box_contains_polygon(surf,boxnum,17281,TRUE); } return FALSE; } //if (trace) { // box_contains_polygon(surf,boxnum,17281,TRUE); //} if (surf->boxes[boxnum*9 + 8]!=-1) { /* Have index into boxpolys array: This box contains polygons */ for (cnt=surf->boxes[boxnum*9 + 8];surf->boxpolys[cnt] >= 0;cnt++) { /* Got a polygon */ polynum=surf->boxpolys[cnt]; firstidx=surf->vertexidx_indices[polynum]; dist = ray_to_plane_distance(starting_point,ray_direc, &surf->vertices[3*surf->vertexidx[firstidx + 0]], &surf->facetnormals[polynum*3]); if (trace) fprintf(stderr,"polynum=%d; zbufdist=%g\n",(int)polynum,dist); if (dist < *zbufpt) { if (ray_intersects_polygon(surf->vertices,&surf->vertexidx[firstidx],surf->numvertices[polynum],surf->facetnormals+3*polynum,surf->refpoints+3*polynum,surf->inplanemats+6*polynum,starting_point,ray_direc,surf->maxradius[polynum],dist,TRUE,trace)) { // Will give us a truth value for whether we actually // intersected this polygon. if (trace) fprintf(stderr,"ray_intersects_polygon; zbufdist=%g\n",dist); // If so, record the distance into the z-buffer, // (if we are closest) // the surface count and facet ids into their // proper places, *zbufpt=dist; *surfaceidpt=surfacecnt; *facetidpt=polynum; retval = retval || TRUE; } else{ if (trace) fprintf(stderr,"ray does not intersect polygon\n"); } } } } /* This box may be sub-divided into 8 */ for (subbox=0;subbox < 8; subbox++) { if (surf->boxes[boxnum*9+subbox] >= 0) { retval = find_ray_intersections(surf,surfacecnt,surf->boxes[boxnum*9 + subbox],starting_point,ray_direc,zbufpt,surfaceidpt,facetidpt,trace) || retval; // Note order of OR is important because if it were first, retval could mask find_ray_intersections from execution } } return retval; } #define FRIN_STACKSIZE 100 static IPOPS_INLINE int find_ray_intersections_nonrecursive(struct projectionsurface *surf, size_t surfacecnt, size_t boxnum_first, float64_t *starting_point, float64_t *ray_direc, float32_t *zbufpt, uint32_t *surfaceidpt, uint32_t *facetidpt,int trace) /* returns nonzero if at least one nearer intersection was found */ { size_t cnt; size_t boxnum_stack[FRIN_STACKSIZE]; size_t subbox; size_t boxnum; size_t stackentries=0; float64_t dist; size_t firstidx; size_t polynum; int retval=FALSE; /* Push given box onto stack */ boxnum_stack[stackentries]=boxnum_first; stackentries++; while (stackentries > 0) { boxnum=boxnum_stack[stackentries-1]; if (trace) fprintf(stderr,"find_ray_intersections_nonrecursive(boxnum=%d)\n",(int)boxnum); if (!ray_box_intersection(&surf->boxcoords[boxnum*6],starting_point,ray_direc)) { /* Ray does not pass through our box. We can not possibly have an intersection */ if (trace) { fprintf(stderr,"find_ray_intersections(): Ray does not intersect box %d\n",(int)boxnum); //box_contains_polygon(surf,boxnum,17281,TRUE); } // Pop this entry off the stack stackentries--; // loop back continue; } //if (trace) { // box_contains_polygon(surf,boxnum,17281,TRUE); //} if (surf->boxes[boxnum*9 + 8]!=-1) { /* Have index into boxpolys array: This box contains polygons */ for (cnt=surf->boxes[boxnum*9 + 8];surf->boxpolys[cnt] >= 0;cnt++) { /* Got a polygon */ polynum=surf->boxpolys[cnt]; firstidx=surf->vertexidx_indices[polynum]; dist = ray_to_plane_distance(starting_point,ray_direc, &surf->vertices[3*surf->vertexidx[firstidx + 0]], &surf->facetnormals[polynum*3]); if (trace) fprintf(stderr,"polynum=%d; zbufdist=%g\n",(int)polynum,dist); if (dist < *zbufpt) { if (ray_intersects_polygon(surf->vertices,&surf->vertexidx[firstidx],surf->numvertices[polynum],surf->facetnormals+3*polynum,surf->refpoints+3*polynum,surf->inplanemats+6*polynum,starting_point,ray_direc,surf->maxradius[polynum],dist,TRUE,trace)) { // Will give us a truth value for whether we actually // intersected this polygon. if (trace) fprintf(stderr,"ray_intersects_polygon; zbufdist=%g\n",dist); // If so, record the distance into the z-buffer, // (if we are closest) // the surface count and facet ids into their // proper places, *zbufpt=dist; *surfaceidpt=surfacecnt; *facetidpt=polynum; retval = retval || TRUE; } else{ if (trace) fprintf(stderr,"ray does not intersect polygon\n"); } } } } /* Pop this box off of the stack */ stackentries--; /* This box may be sub-divided into 8 */ /* Push subboxes onto stack */ for (subbox=0;subbox < 8; subbox++) { if (surf->boxes[boxnum*9+subbox] >= 0) { assert(stackentries < FRIN_STACKSIZE); /* check for stack overflow */ boxnum_stack[stackentries]=surf->boxes[boxnum*9+subbox]; stackentries++; } } } return retval; } static IPOPS_INLINE void projecttoimgbuf(float32_t pixelval,float32_t pixelweighting,float64_t *uvcoords,float64_t *uvcoords_deriv_horiz, float64_t *uvcoords_deriv_vert,volatile atomicpixel_t *imgbuf, volatile atomicpixel_t *weightingbuf,volatile atomicpixel_t *validitybuf,size_t framenum,size_t imgbuf_nx,size_t imgbuf_ny,float64_t min_radius_uv_pixels,float64_t min_radius_src_pixels,float64_t bandwidth_fraction) //size_t max_projection_pixels,uint32 *pixelnumbuf,float64_t *pixelvecbuf,float64_t *pixelmatbuf, { long long arraywidth,arrayheight; int arrayx0,arrayy0; float64_t projecthalfwidth,projecthalfheight; float64_t newwidth,newheight; float64_t uvcoords0_pixels=uvcoords[0]*(imgbuf_nx) - 0.5; float64_t uvcoords1_pixels=uvcoords[1]*(imgbuf_ny) - 0.5; //CvMat *jacobian,*jacinv; float64_t jacobian[4],jacinv[4],detinv; float64_t weightingfactor; size_t xcnt,ycnt; float64_t r2_uv,r2_src,coeff,cosval,cosparam; //sincparam float64_t pos[2],pos_frac[2],srcpos[2]; //float64_t angle_of_incidence_factor; if (IPOPS_ISNAN(pixelval)) { return; /* never project NaN */ } // Ignore anything at extreme angles of incidence //if (angle_of_incidence > 3*M_PI/8) return; //jacobian=cvCreateMat(2,2,CV_64F); //jacobian->data.db[0]=uvcoords_deriv_horiz[0]; //jacobian->data.db[1]=uvcoords_deriv_vert[0]; //jacobian->data.db[2]=uvcoords_deriv_horiz[1]; //jacobian->data.db[3]=uvcoords_deriv_vert[1]; jacobian[0]=uvcoords_deriv_horiz[0]; jacobian[1]=uvcoords_deriv_vert[0]; jacobian[2]=uvcoords_deriv_horiz[1]; jacobian[3]=uvcoords_deriv_vert[1]; //jacinv=cvCreateMat(2,2,CV_64F); //cvInvert(jacobian,jacinv,CV_LU); // 2x2 matrix inverse by determinant // method: // inv([ a b ; c d ]) = (1.0/(ad-bc)) * [d -b ; -c a ] detinv=1.0/(jacobian[0]*jacobian[3]-jacobian[1]*jacobian[2]); jacinv[0] = detinv*jacobian[3]; jacinv[1] = -detinv*jacobian[1]; jacinv[2] = -detinv*jacobian[2]; jacinv[3] = detinv*jacobian[0]; //// Define factor by which we de-emphasize data at larger angles of incidence // (moved into evaluate_zbuffer) //angle_of_incidence_factor = cos(angle_of_incidence * (M_PI/2)/(3*M_PI/8)); projecthalfwidth=min_radius_uv_pixels; // texture coordinates are relative to image size, still projecthalfheight=min_radius_uv_pixels; newwidth=uvcoords_deriv_horiz[0]*min_radius_src_pixels* imgbuf_nx; // mul by imgbuf_nx to convert from tex coordinate to tex pixels if (newwidth > projecthalfwidth) projecthalfwidth=newwidth; newheight=uvcoords_deriv_horiz[1]*min_radius_src_pixels*imgbuf_ny; if (newheight > projecthalfheight) projecthalfheight=newheight; newwidth=uvcoords_deriv_vert[0]*min_radius_src_pixels*imgbuf_nx; if (newwidth > projecthalfwidth) projecthalfwidth=newwidth; newheight=uvcoords_deriv_vert[1]*min_radius_src_pixels*imgbuf_ny; if (newheight > projecthalfheight) projecthalfheight=newheight; arraywidth = (size_t) (projecthalfwidth*2+1); arrayheight= (size_t) (projecthalfheight*2+1); arrayx0 = (int)(uvcoords0_pixels-projecthalfwidth); arrayy0 = (int)(uvcoords1_pixels-projecthalfheight); if (arrayx0 < 0) arrayx0=0; if (arrayy0 < 0) arrayy0=0; if (arrayx0 + arraywidth >= imgbuf_nx) arraywidth = imgbuf_nx-arrayx0-1; if (arrayy0 + arrayheight >= imgbuf_ny) arrayheight = imgbuf_ny-arrayy0-1; if (arraywidth < 0) arraywidth=0; if (arrayheight < 0) arrayheight=0; //pixelcnt=0; for (ycnt=0;ycnt < arrayheight;ycnt++) { for (xcnt=0;xcnt < arraywidth;xcnt++) { // xcnt+arrayx0, ycnt+arrayy0 are the indices into the pixel image // xcnt+arrayx0=0 corresponds to the center of the leftmost pixel // The left edge of this pixel should map to u=0.0,u_pixels=0.0 // this left edge corresponds to xcnt+arrayx0=-0.5 // That gives pos[0] = 0.0 = -0.5 -uvcoords0_pixels = -0.5 -uvcoords[0]*(imgbuf_nx) + 0.5 // pos[0] = 0.0 = -0.5 - 0.0 + 0.5 // // xcnt+arrayx0=imgbuf_nx-1 corresponds to the rightmost pixel // the right edge of thix pixel should map to u=1.0 // This right edge corresponds to xcnt+arrayx0 = imgbuf_nx - 0.5 // that gives pos[0] = imgbuf_nx-0.5 - uvcoords0_pixels // pos[0] = imgbuf_nx-0.5 - uvcoords[0]*(imgbuf_nx) + 0.5 // pos[0] = imgbuf_nx-0.5 - (imgbuf_nx) + 0.5 // pos[0] = -0.5 + 0.5 = 0 ... so we are right on target pos[0]=xcnt+arrayx0 - uvcoords0_pixels; pos[1]=ycnt+arrayy0 - uvcoords1_pixels; r2_uv = pos[0]*pos[0] + pos[1]*pos[1]; // pos is in pixels so far, but jacobian/jacimg are in terms // of the unity width of the UV parameterization. Scale it down pos_frac[0]=pos[0]/imgbuf_nx; pos_frac[1]=pos[1]/imgbuf_ny; //multmatvec2(jacinv->data.db,pos_frac,srcpos); multmatvec2(jacinv,pos_frac,srcpos); r2_src = srcpos[0]*srcpos[0]+srcpos[1]*srcpos[1]; //fprintf(stderr,"r_uv=%g; min_radius_uv = %g\n",sqrt(r2_uv),fabs(min_radius_uv_pixels)); //fprintf(stderr,"r_src=%g; min_radius_src = %g\n",sqrt(r2_src),fabs(min_radius_src_pixels)); if (r2_uv <= min_radius_uv_pixels*min_radius_uv_pixels || r2_src <= min_radius_src_pixels*min_radius_src_pixels) { /* Include this point */ // Forget complicated bandlimited interpolation // Instead project 2D generalized circular sinc function // from source into UV space //if (pixelcnt >= max_projection_pixels) { // /* too many pixels */ // goto fail; //} weightingfactor=1.0; if (weightingbuf) { //weightingfactor=weightingbuf[(arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt)]; weightingfactor=atomicpixel_load(&weightingbuf[(arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt)]); } // Generalized 2D circular sinc: based on // http://www.ebyte.it/library/docs/math07/SincN.html // Eq. 18: For 2D, n=2 // sinc(n,x) = gamma(1+n/2)*J_(n/2)(x)/(x/2)^(n/2) // at n=2 // sinc(2,x) = gamma(2)*J1(x)/(x/2) // where gamma(2) is 1.0 and J1 is the Bessel function of the first kind // WARNING: This fails at x=0 // //sincparam=sqrt(r2_src)*bandwidth_fraction; //if (sincparam < 1e-6) { // // include weighting effects of both the pixel in the camera // // image (pixelweighting) and the u,v space weighting (weightingbuf) // coeff=pixelweighting*weightingfactor; //angle_of_incidence_factor; // effectively 1.0, not including angle of incidence correction //} else{ // // include weighting effects of both the pixel in the camera // // image (pixelweighting) and the u,v space weighting (weightingbuf) // coeff=pixelweighting*weightingfactor * j1(sincparam)/(sincparam/2.0); //angle_of_incidence_factor * j1(sincparam)/(sincparam/2.0); //} // Replacement -- Just use raised Cosine // 1+cos(sqrt(r2_src)*bandwidth_fraction) cosparam=sqrt(r2_src)*bandwidth_fraction*M_PI/M_SQRT2; // if (cosparam > M_PI) { cosval=0.0; } else { cosval=0.5+0.5*cos(cosparam); } coeff=pixelweighting*weightingfactor*cosval; //fprintf(stderr,"imgbuf[%d]+=%g\n",framenum*imgbuf_ny*imgbuf_nx + (arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt),coeff*pixelval); // ***!!!!***!!! THIS IS WHERE THE PARALLEL CPU THREAD TRIP OVER EACH OTHER // SHOULD WRITE WITH ATOMIC OPERATIONS if (imgbuf) { //imgbuf[framenum*imgbuf_ny*imgbuf_nx + (arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt)] += coeff * pixelval; atomicpixel_accumulate(&imgbuf[framenum*imgbuf_ny*imgbuf_nx + (arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt)], coeff*pixelval); } //validitybuf[framenum*imgbuf_ny*imgbuf_nx + (arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt)] += coeff; atomicpixel_accumulate(&validitybuf[framenum*imgbuf_ny*imgbuf_nx + (arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt)],coeff); //if (angleofincidencebuf) angleofincidencebuf[framenum*imgbuf_ny*imgbuf_nx + (arrayy0+ycnt)*imgbuf_nx + (arrayx0+xcnt)] += coeff * angle_of_incidence; //pixelnumbuf[pixelcnt]=ycnt*arraywidth + xcnt; //pixelcnt++; } } } //numpixels=pixelcnt; /* // Form inner product matrix for (pixelcnt=0;pixelcnt < numpixels;pixelcnt++) { xcnt=pixelnumbuf[pixelcnt] % arraywidth; ycnt=pixelnumbuf[pixelcnt] / arraywidth; pos[0]=xcnt+arrayx0 - uvcoords0_pixels; pos[1]=ycnt+arrayy0 - uvcoords1_pixels; // pos is in pixels so far, but jacobian/jacimg are in terms // of the width of the UV image. Scale it down pos_frac[0]=pos[0]/imgbuf_nx; pos_frac[1]=pos[1]/imgbuf_ny; multmatvec2(jacinv->data.dbl,pos_frac,srcpos); // srcpos indicates how many pixels this point is off of the // source image src_r=sqrt(srcpos[0]*srcpos[0]+srcpos[1]*srcpos[1]); //// inner product of sinc function centered at pos, parameter //// bandwidth_fraction*pixel_pos, with sinc function //// centered at (uvcoords0_pixels,uvcoords1_pixels) //pixelmatbuf[pixelcnt]=bandwidth_fraction*bandwidth_fraction*(improj_sinc(bandwidth_fraction * pos[0]) + improj_sinc // srcpos is now (horizontal, vertical) shift in pixels in source image */ //cvReleaseMat(&jacobian); //cvReleaseMat(&jacinv); } static IPOPS_INLINE void basic_raycalcs(struct projectionsurface *surfacearray, size_t surfaceid, float64_t *cam_mtx_Ut, float64_t *cam_mtx_Vtsubinv, float64_t s1, float64_t s2, float64_t V31, float64_t V32, size_t src_pixelx, size_t src_pixely, /* outputs ... */ double *dc1_du0, /* also a flag for whether we need derivs */ double *dc2_du0, double *dc1_dv0, double *dc2_dv0, double *rayvecobj, /* 4-vector */ double *rayvecfactor) { float64_t q,r,c1,c2; double dq_du0,dr_du0,dq_dv0,dr_dv0; float64_t rayvec[4]; //Ut is 2x2 //q=cam_mtx_Ut[0,0]*xcnt + cam_mtx_Ut[0,1]*ycnt; //r=cam_mtx_Ut[1,0]*xcnt + cam_mtx_Ut[1,1]*ycnt; q=cam_mtx_Ut[0]*src_pixelx + cam_mtx_Ut[1]*src_pixely; r=cam_mtx_Ut[2+0]*src_pixelx + cam_mtx_Ut[2+1]*src_pixely; //c1=cam_mtx_Vtsubinv[0,0]*(q/s1-V31) + cam_mtx_Vtsubinv[0,1]*(r/s2-V32); //c2=cam_mtx_Vtsubinv[1,0]*(q/s1-V31) + cam_mtx_Vtsubinv[1,1]*(r/s2-V32); // Vtsubinv is 2x2 c1=cam_mtx_Vtsubinv[0]*(q/s1-V31) + cam_mtx_Vtsubinv[1]*(r/s2-V32); c2=cam_mtx_Vtsubinv[2+0]*(q/s1-V31) + cam_mtx_Vtsubinv[2+1]*(r/s2-V32); if (dc1_du0) { // also store dervatives //dq_du0=cam_mtx_Ut[0,0]; //dr_du0=cam_mtx_Ut[1,0]; //dq_dv0=cam_mtx_Ut[0,1]; //dr_dv0=cam_mtx_Ut[1,1]; // Ut is 2x2 dq_du0=cam_mtx_Ut[0]; dr_du0=cam_mtx_Ut[2+0]; dq_dv0=cam_mtx_Ut[1]; dr_dv0=cam_mtx_Ut[2+1]; //dc1_du0 = cam_mtx_Vtsubinv[0,0]*(dq_du0/s1) + cam_mtx_Vtsubinv[0,1]*(dr_du0/s2); //dc2_du0 = cam_mtx_Vtsubinv[1,0]*(dq_du0/s1) + cam_mtx_Vtsubinv[1,1]*(dr_du0/s2); //dc1_dv0 = cam_mtx_Vtsubinv[0,0]*(dq_dv0/s1) + cam_mtx_Vtsubinv[0,1]*(dr_dv0/s2); //dc2_dv0 = cam_mtx_Vtsubinv[1,0]*(dq_dv0/s1) + cam_mtx_Vtsubinv[1,1]*(dr_dv0/s2); // Vtsubinv is 2x2 *dc1_du0 = cam_mtx_Vtsubinv[0]*(dq_du0/s1) + cam_mtx_Vtsubinv[1]*(dr_du0/s2); *dc2_du0 = cam_mtx_Vtsubinv[2+0]*(dq_du0/s1) + cam_mtx_Vtsubinv[2+1]*(dr_du0/s2); *dc1_dv0 = cam_mtx_Vtsubinv[0]*(dq_dv0/s1) + cam_mtx_Vtsubinv[1]*(dr_dv0/s2); *dc2_dv0 = cam_mtx_Vtsubinv[2+0]*(dq_dv0/s1) + cam_mtx_Vtsubinv[2+1]*(dr_dv0/s2); } // OK... We have a ray leaving (0,0,0) in the direction // of (unnormalized) (c1,c2,1) in camera coordinates rayvec[0]=c1; rayvec[1]=c2; rayvec[2]=1.0; rayvec[3]=0.0; // # "vector" has 0 w-component /* apply correction for camera matrix being in OpenCV coords, but our transforms being in OpenGL coords */ /* We negate rayvec[1] and rayvec[2] rather than messing with tosurfaceframe. */ rayvec[1]=-rayvec[1]; rayvec[2]=-rayvec[2]; multmatvec4(surfacearray[surfaceid].tosurfaceframe,rayvec,rayvecobj); *rayvecfactor = to_unit_vector4(rayvecobj); } static IPOPS_INLINE void deriv_raycalcs(struct projectionsurface *surfacearray, float64_t *focalpointobj, float64_t *rayvecobj, double rayvecfactor, size_t surfaceid, size_t facetid, double dc1_du0, double dc2_du0, double dc1_dv0, double dc2_dv0, /* outputs */ float64_t *horiz_ray_intersect_shift_deriv, float64_t *vert_ray_intersect_shift_deriv) { float64_t drayvec_du0[4]; float64_t drayvec_du0obj[4]; float64_t drayvec_dv0[4]; float64_t drayvec_dv0obj[4]; size_t firstidx; float64_t horiz_ray_direc_shift[3]; float64_t vert_ray_direc_shift[3]; /* Note minus signs inserted in elements 1 and 2 to compensate for OpenGL vs. OpenCV and needing to negate columns of tosurfaceframe */ drayvec_du0[0]=dc1_du0*rayvecfactor; drayvec_du0[1]=-dc2_du0*rayvecfactor; drayvec_du0[2]=0.0; // -1.0*rayvecfactor; drayvec_du0[3]=0.0; multmatvec4(surfacearray[surfaceid].tosurfaceframe,drayvec_du0,drayvec_du0obj); drayvec_dv0[0]=dc1_dv0*rayvecfactor; drayvec_dv0[1]=-dc2_dv0*rayvecfactor; drayvec_dv0[2]=0.0; // -1.0*rayvecfactor; drayvec_dv0[3]=0.0; multmatvec4(surfacearray[surfaceid].tosurfaceframe,drayvec_dv0,drayvec_dv0obj); firstidx=surfacearray[surfaceid].vertexidx_indices[facetid]; // Figure out what happens as we shift to adjacent pixel // in image being projected. // ray_direc + ray_direc_deriv_horiz -> horizshifted_ray_direc // Consider derivative of // vector normalization: deriv (a/|a|) // = (deriv(a)*|a| - (deriv(|a|) a)/|a|^2 // = deriv(a)/|a| - (a/|a|) deriv(|a|)/|a| // since a is already normalized, |a| = 1.0 // so = deriv(a) - a deriv|a| // where |a| = sqrt(x*2 + y*2 + z*2) // so deriv |a| = d|a|/dwhatever = 0.5|a|^(-1)*2x dx/dwhatever + 0.5|a|^(-1)*2y dy/dwhatever + 0.5*|a|^(-1)*2z dz/dwhatever // deriv |a| = (1/|a|) x dx/dwhatever + y dy/dwhatever + z dz/dwhatever // deriv |a| = (1/|a|) (a dot deriv(a)) // So full derivative = deriv(a) - a ((1/|a|) (a dot deriv(a))) // = deriv(a) - a (a dot deriv(a)) // Therefore, // horiz_ray_direc_shift = ray_direc_deriv_horiz - ray_direc * (dot(ray_direc,ray_direc_deriv_horiz) addvecscaledvec3(drayvec_du0obj,-dotvecvec3(rayvecobj,drayvec_du0obj),rayvecobj,horiz_ray_direc_shift); // horiz_ray_direct_shift is the derivative of the ray unit vector with respect to a horizontal pixel shift, in object coordinates // Determine intersect position shift in object coordinates with respect to a unit horizontal pixel shift ray_to_plane_raydirec_shift(focalpointobj,rayvecobj,&surfacearray[surfaceid].vertices[3*surfacearray[surfaceid].vertexidx[firstidx + 0]], &surfacearray[surfaceid].facetnormals[facetid*3],horiz_ray_direc_shift,horiz_ray_intersect_shift_deriv); // Do same for vert, store vert_ray_intersect_shift_deriv addvecscaledvec3(drayvec_dv0obj,-dotvecvec3(rayvecobj,drayvec_dv0obj),rayvecobj,vert_ray_direc_shift); // vert_ray_direct_shift is the derivative of the ray unit vector with respect to a vertical pixel shift, in object coordinates // Determine intersect position shift in object coordinates with respect to unit a vertical pixel shift ray_to_plane_raydirec_shift(focalpointobj,rayvecobj,&surfacearray[surfaceid].vertices[3*surfacearray[surfaceid].vertexidx[firstidx + 0]], &surfacearray[surfaceid].facetnormals[facetid*3],vert_ray_direc_shift,vert_ray_intersect_shift_deriv); } static IPOPS_INLINE void orthographic_deriv_raycalcs(struct projectionsurface *surfacearray, float64_t *raysrclocobj, float64_t *rayvecobj, size_t surfaceid, size_t facetid, float64_t *horiz_raysrc_shift, float64_t *vert_raysrc_shift, /* outputs */ float64_t *horiz_ray_intersect_shift_deriv, float64_t *vert_ray_intersect_shift_deriv) { size_t firstidx; firstidx=surfacearray[surfaceid].vertexidx_indices[facetid]; // Figure out what happens as we shift to adjacent pixel // in image being projected. // ray_direc remains constant // raysrcloc + horiz_raysrc_shift -> horizshifted_raysrc // Determine intersect position shift in object coordinates with respect to a unit horizontal pixel shift ray_to_plane_raypos_shift(raysrclocobj,rayvecobj,&surfacearray[surfaceid].vertices[3*surfacearray[surfaceid].vertexidx[firstidx + 0]], &surfacearray[surfaceid].facetnormals[facetid*3],horiz_raysrc_shift,horiz_ray_intersect_shift_deriv); // Do same for vert, store vert_ray_intersect_shift_deriv // Determine intersect position shift in object coordinates with respect to unit a vertical pixel shift ray_to_plane_raypos_shift(raysrclocobj,rayvecobj,&surfacearray[surfaceid].vertices[3*surfacearray[surfaceid].vertexidx[firstidx + 0]], &surfacearray[surfaceid].facetnormals[facetid*3],vert_raysrc_shift,vert_ray_intersect_shift_deriv); } void find_intersect_uv(struct projectionsurface *surfacearray, float64_t *focalpointobj, float64_t *rayvecobj, size_t surfaceid, size_t facetid, float64_t dist, float64_t *horiz_ray_intersect_shift_deriv, // only needed if calculating derivatives float64_t *vert_ray_intersect_shift_deriv, // only needed if calculating derivatives /* outputs */ float64_t *intersectpoint2uvcanon, // intersection point in texture coordinates (not scaled UV coordinates float64_t *intersectpoint2uvcanon_deriv_horiz, // also flag for whether to calc derivatives float64_t *intersectpoint2uvcanon_deriv_vert) { float64_t intersectpoint[3]; float64_t intersectpoint3poly[3]; float64_t intersectpoint2poly[3]; // Each polygon has a refpoint (point centroid), // A normal, two in-plane basis vectors // (inplanevec1,inplanevec2) as rows // of a 2x3 matrix inplanemat, calculated from from SVD of // (polygon points - polygon centroid) // and a 2x3 matrix that gives texture // (intrinsic parameterization) coordinates from // (inplanevec1, inplanevec2, 1) //// Evaluate coordinates relative to centroid //numvertices=0 //for CCnt in range(surfacearray[surfaceid].max_vertices): // thisvertexid=surfacearray[surfaceid].vertexids[surfacearray[surfaceid].max_vertices*facetid+CCnt] // if thisvertexid < 0: // // This polygon has no more vertices // break // // subvecvec3(&surfacearray[surfaceid].vertices[thisvertexid*3],&surfacearray[surfaceid].refpt,&relcoords[CCnt*3]) // // numvertices+=1 // pass // find intersectpoint in object coordinates addvecscaledvec3(focalpointobj,dist,rayvecobj,intersectpoint); // Also store the (u,v) coordinates projected so that they can be // used to reduce the weightings near edge boundaries. // Evaluate 3D intersection point relative to polygon subvecvec3(intersectpoint,&surfacearray[surfaceid].refpoints[3*facetid],intersectpoint3poly); // Evaluate 2D intersection point relative to polygon multmat23vec(&surfacearray[surfaceid].inplanemats[6*facetid],intersectpoint3poly,intersectpoint2poly); // be sure to allocate 3 numbers of space // for intersectpoint2poly & friends as we will use it // in projective form intersectpoint2poly[2]=1.0; // A point in projective space // Evaluate 2D polygon to (u,v) (transform in-plane 2D coords -> intrinsic texture coords) multmat23vec(&surfacearray[surfaceid].inplane2texcoords[6*facetid],intersectpoint2poly,intersectpoint2uvcanon); // use ,intersectpoint2tex) once we have a texcoords->uvcanon mapping if (intersectpoint2uvcanon_deriv_horiz) { /* if calculate derivatives... */ float64_t intersectpoint2poly_deriv_horiz[3]; // needs 3 elements float64_t intersectpoint2poly_deriv_vert[3]; // Evaluate derivatives of 2D intersection point // be sure to allocate 3 numbers of space // for intersectpoint2poly & friends as we will use it // in projective form multmat23vec(&surfacearray[surfaceid].inplanemats[6*facetid],horiz_ray_intersect_shift_deriv,intersectpoint2poly_deriv_horiz); multmat23vec(&surfacearray[surfaceid].inplanemats[6*facetid],vert_ray_intersect_shift_deriv,intersectpoint2poly_deriv_vert); intersectpoint2poly_deriv_horiz[2]=0.0; // derivatives are vectors in 2-space, intepreted in a projective space intersectpoint2poly_deriv_vert[2]=0.0; // Derivatives in (u,v) multmat23vec(&surfacearray[surfaceid].inplane2texcoords[6*facetid],intersectpoint2poly_deriv_horiz,intersectpoint2uvcanon_deriv_horiz); // use intersectpoint2tex_deriv_horiz once we have a texcoords->uvcanon coords mapping ) multmat23vec(&surfacearray[surfaceid].inplane2texcoords[6*facetid],intersectpoint2poly_deriv_vert,intersectpoint2uvcanon_deriv_vert); // use intersectpoint2tex_deriv_vert once we have a texcoords->uvcanon coords mapping ) } // Now we have uv parameterization coordinates // intersectpoint2uvcanon as well as derivatives // of those coordinates representing motion of // one pixel in the projected image to the right (horiz) // and one pixel down (vert). } static IPOPS_INLINE void evaluate_zbuffer(struct projectionsurface *surfacearray, size_t src_nx, size_t src_ny, size_t numsurfaces, float64_t *cam_mtx_Ut, float64_t *cam_mtx_Vtsubinv, float64_t s1,float64_t s2, float64_t V31,float64_t V32, float32_t *imagedata_zbuffer, uint32_t *imagedata_surfaceid, uint32_t *imagedata_facetid, float32_t *imagedata_angleofincidence, float32_t *imagedata_angleofincidence_weighting, float32_t *imagedata_horiz_ray_intersect_shift_deriv_cam_z, float32_t *imagedata_vert_ray_intersect_shift_deriv_cam_z, float32_t *imagedata_uvcoords /* in texture coordinates [0,1], not physical scaled coordinates */) { int64_t surfaceycnt; // must be signed (e.g. not size_t) for MSVC compatibility #pragma omp parallel default(shared) private(surfaceycnt) // ,src_ny,numsurfaces) { #pragma omp for for (surfaceycnt=0;surfaceycnt < src_ny*numsurfaces;surfaceycnt++) { size_t ycnt,surfacecnt,xcnt; float64_t focalpointobj[4]; float64_t rayvecobj[4]; // rayvec in object coordinates float64_t rayvecfactor; int trace=FALSE; size_t firstidx; //float64_t rayvecfactor; ycnt=surfaceycnt % src_ny; // numsurfaces; surfacecnt=surfaceycnt / src_ny; // numsurfaces; // calculation of focalpointobj: Coordinates of focal point in object coordinates for this surface. // since we are doing this OpenCV style, negate the 2nd and 3rd columns... see imageprojectionmodel.py // (except we don't actually do this because we are just accessing the 4th column) focalpointobj[0]=surfacearray[surfacecnt].tosurfaceframe[0*4 + 3]; focalpointobj[1]=surfacearray[surfacecnt].tosurfaceframe[1*4 + 3]; focalpointobj[2]=surfacearray[surfacecnt].tosurfaceframe[2*4 + 3]; focalpointobj[3]=surfacearray[surfacecnt].tosurfaceframe[3*4 + 3]; normalize_wcoord4(focalpointobj); for (xcnt=0; xcnt < src_nx; xcnt++) { // Go through each source image pixel, // project it through to surface. If it // intersects closer, mark the z-buffer // and surface ID. This will give us a map // of the source image for each pixel, which surface // it maps onto. // NOTE: These are recalculated later in project_image... formulas should be THE SAME!!! basic_raycalcs(surfacearray, surfacecnt, cam_mtx_Ut, cam_mtx_Vtsubinv, s1,s2,V31,V32, xcnt,ycnt, /* outputs ... */ NULL, /* also a flag for whether we need derivs */ NULL, NULL, NULL, rayvecobj, /* 4-vector */ &rayvecfactor); if (FALSE && ycnt==237 && xcnt==352) { int polynum; double dist; int hitcnt=0; struct projectionsurface *surf; /* debugging */ fprintf(stderr,"y=%d/%d, x=%d/%d\n",(int)ycnt,(int)src_ny,(int)xcnt,(int)src_nx); trace=TRUE; surf=&surfacearray[surfacecnt]; for (polynum = 0; polynum < surf->npolys;polynum++) { firstidx=surf->vertexidx_indices[polynum]; dist = ray_to_plane_distance(focalpointobj,rayvecobj, &surf->vertices[3*surf->vertexidx[firstidx + 0]], &surf->facetnormals[polynum*3]); if (ray_intersects_polygon(surf->vertices,&surf->vertexidx[firstidx],surf->numvertices[polynum],surf->facetnormals+3*polynum,surf->refpoints+3*polynum,surf->inplanemats+6*polynum,focalpointobj,rayvecobj,surf->maxradius[polynum],dist,TRUE,trace && FALSE)) { fprintf(stderr,"ray intersects polygon %d; dist=%f\n",(int)polynum,dist); hitcnt++; } } if (!hitcnt) { fprintf(stderr,"ray did not intersect any polygons\n"); } } else { trace=FALSE; } // find_ray_intersections() should fill out the z-buffer, surface id, and facet id arrays for the closest intersection find_ray_intersections(&surfacearray[surfacecnt],surfacecnt,0,focalpointobj,rayvecobj,imagedata_zbuffer + src_nx*ycnt + xcnt,imagedata_surfaceid + src_nx*ycnt + xcnt,imagedata_facetid + src_nx*ycnt + xcnt,trace); } } } /* Next phase: evaluate derivatives, etc. */ #pragma omp parallel default(shared) { int64_t ycnt; // must be signed (e.g. not size_t) for MSVC compatibility static const uint32_t NaNconst=0x7fc00000; float32_t NaNval; memcpy(&NaNval,&NaNconst,sizeof(NaNval)); #pragma omp for private(ycnt) // ,src_ny) for (ycnt=0;ycnt < src_ny;ycnt++) { int64_t xcnt; // must be signed (e.g. not size_t) for MSVC compatibility float64_t focalpointobj[4]; double dc1_du0,dc2_du0,dc1_dv0,dc2_dv0; float64_t rayvecobj[4]; // rayvec in object coordinates float64_t rayvecfactor; float64_t dist; float64_t horiz_ray_intersect_shift_deriv[3]; float64_t horiz_ray_intersect_shift_deriv_cam[3]; float64_t vert_ray_intersect_shift_deriv[3]; float64_t vert_ray_intersect_shift_deriv_cam[3]; float64_t intersectpoint2uvcanon[3]; // intersection point in texture coordinates (not scaled UV coordinates float64_t intersectpoint2uvcanon_deriv_horiz[3]; float64_t intersectpoint2uvcanon_deriv_vert[3]; float64_t angle_of_incidence,angle_of_incidence_factor; uint32_t surfaceid,facetid; for (xcnt=0; xcnt < src_nx; xcnt++) { // Go through each source image pixel, find derivatives and where it projects in (u,v) space // Dist was calculated by find_ray_intersections(), above dist = imagedata_zbuffer[src_nx*ycnt + xcnt]; if (!isinf(dist)) { /* if we actually got a ray intersection above */ surfaceid=imagedata_surfaceid[ycnt*src_nx + xcnt]; facetid=imagedata_facetid[ycnt*src_nx + xcnt]; // calculation of focalpointobj: Coordinates of focal point in object coordinates for this surface. // since we are doing this OpenCV style, negate the 2nd and 3rd columns... see imageprojectionmodel.py // (except we don't need to do this because we are just extracting the 4th column) focalpointobj[0]=surfacearray[surfaceid].tosurfaceframe[0*4 + 3]; focalpointobj[1]=surfacearray[surfaceid].tosurfaceframe[1*4 + 3]; focalpointobj[2]=surfacearray[surfaceid].tosurfaceframe[2*4 + 3]; focalpointobj[3]=surfacearray[surfaceid].tosurfaceframe[3*4 + 3]; normalize_wcoord4(focalpointobj); basic_raycalcs(surfacearray, surfaceid, cam_mtx_Ut, cam_mtx_Vtsubinv, s1,s2,V31,V32, xcnt,ycnt, /* outputs ... */ &dc1_du0, /* also a flag for whether we need derivs */ &dc2_du0, &dc1_dv0, &dc2_dv0, rayvecobj, /* 4-vector */ &rayvecfactor); // Evaluate derivatives... We convert them to object coordinates, evaluate the shifts // in object coordinates, then shift back. // (it might be marginally more efficient to evaluate the derivatives in camera coordinates) deriv_raycalcs(surfacearray, focalpointobj, rayvecobj,rayvecfactor, surfaceid, facetid, dc1_du0,dc2_du0,dc1_dv0,dc2_dv0, /* outputs */ horiz_ray_intersect_shift_deriv, vert_ray_intersect_shift_deriv); // Determine intersect position shift in camera coordinates with respect to a unit horizontal pixel shift multmatvec3(surfacearray[surfaceid].vecfromsurfaceframe,horiz_ray_intersect_shift_deriv,horiz_ray_intersect_shift_deriv_cam); /* evaluate left and right predicted z values... add z coordinate of derivative to this pixel's position */ //imagedata_projectzright[src_nx*ycnt + xcnt] = dist + 1.0*horiz_ray_intersect_shift_deriv_cam[2]; //imagedata_projectzleft[src_nx*ycnt + xcnt] = dist - 1.0*horiz_ray_intersect_shift_deriv_cam[2]; imagedata_horiz_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=1.0*horiz_ray_intersect_shift_deriv_cam[2]; // Determine intersect position shift in camera coordinates with respect to a unit vertical pixel shift multmatvec3(surfacearray[surfaceid].vecfromsurfaceframe,vert_ray_intersect_shift_deriv,vert_ray_intersect_shift_deriv_cam); /* evaluate up and down predicted z values... add z coordinate of derivative to this pixel's position */ /* NOTE: We are assuming y ordering is like a raster scan (down is increasing y) */ //imagedata_projectzdown[src_nx*ycnt + xcnt] = dist + 1.0*vert_ray_intersect_shift_deriv_cam[2]; //imagedata_projectzup[src_nx*ycnt + xcnt] = dist - 1.0*vert_ray_intersect_shift_deriv_cam[2]; imagedata_vert_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=vert_ray_intersect_shift_deriv_cam[2]; // So horiz_ray_intersect_shift_deriv // and vert_ray_intersect_shift_deriv // are vectors indicating what happens when you move // one pixel to the right and one pixel down // on the image being projected /* determine angle of incidence, for weighting use */ angle_of_incidence = acos(fabs(dotvecvec3(rayvecobj,&surfacearray[surfaceid].facetnormals[imagedata_facetid[src_nx*ycnt + xcnt]*3]))); // rayvecobj and facetnormals should be unit vectors // Ignore anything at extreme angles of incidence if (angle_of_incidence > 3*M_PI/8) { angle_of_incidence_factor=0.0; } else { // Define factor by which we de-emphasize data at larger angles of incidence angle_of_incidence_factor = cos(angle_of_incidence * (M_PI/2)/(3*M_PI/8)); } imagedata_angleofincidence_weighting[src_nx*ycnt + xcnt] = angle_of_incidence_factor; if (imagedata_angleofincidence) { imagedata_angleofincidence[src_nx*ycnt + xcnt] = angle_of_incidence; } find_intersect_uv(surfacearray, focalpointobj, rayvecobj, surfaceid, imagedata_facetid[src_nx*ycnt + xcnt], dist, horiz_ray_intersect_shift_deriv, // only needed if calculating derivatives vert_ray_intersect_shift_deriv, // only needed if calculating derivatives /* outputs */ intersectpoint2uvcanon, // intersection point in texture coordinates (not scaled UV coordinates intersectpoint2uvcanon_deriv_horiz, // also flag for whether to calc derivatives intersectpoint2uvcanon_deriv_vert); imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2]=intersectpoint2uvcanon[0]; // Store u coordinate imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2 + 1]=intersectpoint2uvcanon[1]; // store v coordinate if (surfacearray[surfaceid].angle_of_incidence_factor_buf_uv) { /* if buffer to store angle_of_incidence_factor in uv coordinate frame is provided... */ /* use projecttoimgbuf() to map out projection validity region */ /* we provide angle_of_incidence_factor as the pixel value so that imgbuf ends up with the sum of projection weighting * angle of incidence factor whereas validitybuf ends up with the sum of the angle of incidence factors */ projecttoimgbuf(angle_of_incidence_factor,1.0,intersectpoint2uvcanon,intersectpoint2uvcanon_deriv_horiz,intersectpoint2uvcanon_deriv_vert,surfacearray[surfaceid].angle_of_incidence_factor_buf_uv,NULL,surfacearray[surfaceid].validitybuf,0,surfacearray[surfaceid].nx,surfacearray[surfaceid].ny,MIN_RADIUS_UV_PIXELS,MIN_RADIUS_SRC_PIXELS,BANDWIDTH_FRACTION); } } else { /* No intersection found at all */ imagedata_horiz_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=NaNval; imagedata_vert_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=NaNval; imagedata_angleofincidence_weighting[src_nx*ycnt + xcnt] = 0.0; imagedata_angleofincidence[src_nx*ycnt + xcnt]=NaNval; imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2]=NaNval; imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2 + 1]=NaNval; } } } } } static IPOPS_INLINE void project_image(struct projectionsurface *surfacearray, float32_t *framedata, size_t framecnt, size_t src_nx, size_t src_ny, float64_t *cam_mtx_Ut, float64_t *cam_mtx_Vtsubinv, float64_t s1,float64_t s2, float64_t V31,float64_t V32, float32_t *imagedata_zbuffer, uint32_t *imagedata_surfaceid, uint32_t *imagedata_facetid, float32_t *imagedata_weighting,int debug) { int64_t ycnt; // must be signed (e.g. not size_t) for MSVC compatibility //omp_lock_t mutex; //omp_init_lock(&mutex); { // ***!!!! If you allow this loop to be parallel -- either by // enabling OpenMP or by not restricting it to num_threads(1) // then (even if you use the commented-out lock to restrain more // than one thread from executing in parallel), you get // slightly different (!!!) results from the different threads, // which are presumably running on different CPU cores. // The difference seems to be on the order of machine precision // (i.e. O(1e-7) for single precision // See demos/testregistration_projectionmismatch.py for // testcase #pragma omp parallel for default(shared) private(ycnt) // num_threads(1) for (ycnt=0;ycnt < src_ny;ycnt++) { //omp_set_lock(&mutex); size_t xcnt; for (xcnt=0; xcnt < src_nx; xcnt++) { if (!IPOPS_ISINF(imagedata_zbuffer[ycnt*src_nx + xcnt])) { int32_t surfaceid,facetid; float64_t focalpointobj[4]; double dc1_du0,dc2_du0,dc1_dv0,dc2_dv0; float64_t rayvecobj[4]; // rayvec in object coordinates float64_t rayvecfactor; float64_t dist; float64_t horiz_ray_intersect_shift_deriv[3]; float64_t vert_ray_intersect_shift_deriv[3]; float64_t intersectpoint2uvcanon[3]; // intersection point in texture coordinates (not scaled UV coordinates float64_t intersectpoint2uvcanon_deriv_horiz[3]; float64_t intersectpoint2uvcanon_deriv_vert[3]; //float64_t angle_of_incidence; /* finite z-buffer means this point maps onto the object */ surfaceid=imagedata_surfaceid[ycnt*src_nx + xcnt]; facetid=imagedata_facetid[ycnt*src_nx + xcnt]; /* identify the focal point in object coordinates, as in evaluate_zbuffer */ // calculation of focalpointobj: Coordinates of focal point in object coordinates for this surface. // since we are doing this OpenCV style, negate the 2nd and 3rd columns... see imageprojectionmodel.py // (except we don't actually do this because we are just extracting the 4th column) focalpointobj[0]=surfacearray[surfaceid].tosurfaceframe[0*4 + 3]; focalpointobj[1]=surfacearray[surfaceid].tosurfaceframe[1*4 + 3]; focalpointobj[2]=surfacearray[surfaceid].tosurfaceframe[2*4 + 3]; focalpointobj[3]=surfacearray[surfaceid].tosurfaceframe[3*4 + 3]; normalize_wcoord4(focalpointobj); // Then we go back through each pixel and // perform the mapping, stamping down sinc-interpolation // (or similar lowpass interpolation) // patches onto the parameterization output image. // NOTE: These calculations are identical // to the pre-calculations used to do // z-buffering in evaluate_zbuffer, above! basic_raycalcs(surfacearray, surfaceid, cam_mtx_Ut, cam_mtx_Vtsubinv, s1,s2,V31,V32, xcnt,ycnt, /* outputs ... */ &dc1_du0, /* also a flag for whether we need derivs */ &dc2_du0, &dc1_dv0, &dc2_dv0, rayvecobj, /* 4-vector */ &rayvecfactor); // Same for derivatives... deriv_raycalcs(surfacearray, focalpointobj, rayvecobj,rayvecfactor, surfaceid, facetid, dc1_du0,dc2_du0,dc1_dv0,dc2_dv0, /* outputs */ horiz_ray_intersect_shift_deriv, vert_ray_intersect_shift_deriv); // So horiz_ray_intersect_shift_deriv // and vert_ray_intersect_shift_deriv // are vectors indicating what happens when you move // one pixel to the right and one pixel down // on the image being projected // // Now we have to figure out where this data // actually goes... i.e. map onto texture coordinates // // // Find intersection point, as in evaluate_zbuffer(), above // firstidx=surfacearray[surfaceid].vertexidx_indices[facetid]; // //dist = ray_to_plane_distance(focalpointobj,rayvecobj,&surfacearray[surfaceid].vertices[3*surfacearray[surfaceid].vertexidx[firstidx + 0]], &surfacearray[surfaceid].facetnormals[facetid*3]); //assert(dist==imagedata_zbuffer[src_nx*ycnt + xcnt]); // Should get exactly same result as previous calculation //angle_of_incidence = acos(fabs(dotvecvec3(rayvecobj,&surfacearray[surfaceid].facetnormals[facetid*3]))); // rayvecobj and facetnormals should be unit vectors dist=imagedata_zbuffer[src_nx*ycnt + xcnt]; // be sure to allocate 3 numbers of space // for intersectpoint2uvcanon & friends as we will use it // in projective form // find intersectpoint find_intersect_uv(surfacearray, focalpointobj, rayvecobj, surfaceid,facetid, dist, horiz_ray_intersect_shift_deriv, // only needed if calculating derivatives vert_ray_intersect_shift_deriv, // only needed if calculating derivatives /* outputs */ intersectpoint2uvcanon, // intersection point in texture coordinates (not scaled UV coordinates intersectpoint2uvcanon_deriv_horiz, // also flag for whether to calc derivatives intersectpoint2uvcanon_deriv_vert); // Now we have uv parameterization coordinates // intersectpoint2uvcanon as well as derivatives // of those coordinates representing motion of // one pixel in the projected image to the right (horiz) // and one pixel down (vert). // !!!*** Evaluate intrinsic texture coords -> canonical // texture coords transform and derivatives, here // once implemented ***!!! // Evaluate within 1.5px by bandlimited splat projecttoimgbuf(framedata[src_nx*ycnt+xcnt],imagedata_weighting[src_nx*ycnt + xcnt],intersectpoint2uvcanon,intersectpoint2uvcanon_deriv_horiz,intersectpoint2uvcanon_deriv_vert,surfacearray[surfaceid].imgbuf,surfacearray[surfaceid].weightingbuf,surfacearray[surfaceid].validitybuf,framecnt,surfacearray[surfaceid].nx,surfacearray[surfaceid].ny,MIN_RADIUS_UV_PIXELS,MIN_RADIUS_SRC_PIXELS,BANDWIDTH_FRACTION); // ... Check if answer variability is due to floating point mode differences (nope!) //femode_t femode={0}; //fegetmode(&femode); //printf("__control_word=0x%4.4x; __glibc_reserved=0x%4.4x; __mxcsr=0x%8.8x\n",(unsigned)femode.__control_word,(unsigned)femode.__glibc_reserved,(unsigned)femode.__mxcsr); // Projecttoimgbuf // accumulates not just the patches, but a validity // factor representing how much input there was in // each destination pixel. The accumulated image // can then be normalized by the validity factor // to get a meaningful estimate (and pixels with // validity factor << 1 should be set to NaN) } } //omp_unset_lock(&mutex); } //#pragma omp barrier /* Now we can use the validity map to normalize the generated image */ /* Actually, no we can't because we might have several different image sources being collected together, and we only want to normalize at the very end. So it wants to be a different function */ /* #pragma omp for for (ycnt=0;ycnt < src_ny;ycnt++) { size_t xcnt; for (xcnt=0; xcnt < src_nx; xcnt++) { } }*/ } } static IPOPS_INLINE void evaluate_zbuffer_orthographic(struct projectionsurface *surfacearray, size_t src_nx, size_t src_ny, size_t numsurfaces, float64_t *orthographic_proj_matrix, float32_t *imagedata_zbuffer, uint32_t *imagedata_surfaceid, uint32_t *imagedata_facetid, float32_t *imagedata_angleofincidence, float32_t *imagedata_angleofincidence_weighting, float32_t *imagedata_uvcoords /* in texture coordinates [0,1], not physical scaled coordinates */) { int64_t surfaceycnt; // must be signed (e.g. not size_t) for MSVC compatibility /* orthographic_proj_matrix is defined as follows [ u ] = [ fx 0 cu ][ x ] [ v ] = [ 0 fy cv ][ y ] [ 1 ] where x, y relative to "camera location" (x,y,z) = (0,0,0). Camera looking in +z direction where u,v image pixel coordinates fx = pixels/mm in x fy = pixels/mm in y cu = center u (in pixels) cv = center v (in pixels) ... z is NOT PASSED to this matrix ... therefore u = fx x + cu v = fy y + cv u - cu = fx x v - cv = fy y x = ( u - cu )/fx y = ( v - cv )/fy Also, for (u,v) image: Step1=1.0/fx IniVal1=-cu/fx Step2=1.0/fy IniVal2=-cv/fy or equivalently: fx=1.0/Step1 cu=-IniVal1*fx fy=1.0/Step2 cv=-IniVal2*fy */ #pragma omp parallel default(shared) private(surfaceycnt) //,src_ny,numsurfaces) { #pragma omp for for (surfaceycnt=0;surfaceycnt < src_ny*numsurfaces;surfaceycnt++) { size_t ycnt,surfacecnt,xcnt; float64_t rayvec[4]; float64_t rayvecobj[4]; // rayvec in object coordinates float64_t raysrcloc[4],raysrclocobj[4]; int trace=FALSE; size_t firstidx; //float64_t rayvecfactor; ycnt=surfaceycnt % src_ny; // numsurfaces; surfacecnt=surfaceycnt / src_ny; // numsurfaces; for (xcnt=0; xcnt < src_nx; xcnt++) { // Go through each source image pixel, // project it through to surface. If it // intersects closer, mark the z-buffer // and surface ID. This will give us a map // of the source image for each pixel, which surface // it maps onto. // NOTE: These are recalculated later in project_image... formulas should be THE SAME!!! // rayvec in camera coordinates rayvec[0]=0; rayvec[1]=0; rayvec[2]=1.0; rayvec[3]=0.0; // "vector" has 0 w-component /* apply correction for camera matrix being in OpenCV coords, but our transforms being in OpenGL coords */ /* We negate rayvec[1] and rayvec[2] rather than messing with tosurfaceframe. */ rayvec[1]=-rayvec[1]; rayvec[2]=-rayvec[2]; // convert to object coordinates multmatvec4(surfacearray[surfacecnt].tosurfaceframe,rayvec,rayvecobj); // no need to normalize since a unit vector to begin with (rayvecfactor=1) // calculation of raysrclocobj: Coordinates of ray source in object coordinates: // since we are doing this OpenCV style, negate the 2nd and 3rd columns... see imageprojectionmodel.py // (except we don't actually do this (???)) // ray source location in camera coords raysrcloc[0]=(xcnt - orthographic_proj_matrix[2])/orthographic_proj_matrix[0]; // (u-cu)/fx raysrcloc[1]=-(ycnt - orthographic_proj_matrix[5])/orthographic_proj_matrix[4]; // (v-cv)/fy (negate because tosurfaceframe is in opengl coords) raysrcloc[2]=-0.0; // negate because tosurfaceframe is in OpenGL coords raysrcloc[3]=1.0; /* this is a point */ multmatvec4(surfacearray[surfacecnt].tosurfaceframe,raysrcloc,raysrclocobj); normalize_wcoord4(raysrclocobj); if (FALSE && ycnt==237 && xcnt==352) { int polynum; double dist; int hitcnt=0; struct projectionsurface *surf; /* debugging */ fprintf(stderr,"y=%d/%d, x=%d/%d\n",(int)ycnt,(int)src_ny,(int)xcnt,(int)src_nx); trace=TRUE; surf=&surfacearray[surfacecnt]; for (polynum = 0; polynum < surf->npolys;polynum++) { firstidx=surf->vertexidx_indices[polynum]; dist = ray_to_plane_distance(raysrclocobj,rayvecobj, &surf->vertices[3*surf->vertexidx[firstidx + 0]], &surf->facetnormals[polynum*3]); if (ray_intersects_polygon(surf->vertices,&surf->vertexidx[firstidx],surf->numvertices[polynum],surf->facetnormals+3*polynum,surf->refpoints+3*polynum,surf->inplanemats+6*polynum,raysrclocobj,rayvecobj,surf->maxradius[polynum],dist,TRUE,trace && FALSE)) { fprintf(stderr,"ray intersects polygon %d; dist=%f\n",(int)polynum,dist); hitcnt++; } } if (!hitcnt) { fprintf(stderr,"ray did not intersect any polygons\n"); } } else { trace=FALSE; } // find_ray_intersections() should fill out the z-buffer, surface id, and facet id arrays for the closest intersection find_ray_intersections(&surfacearray[surfacecnt],surfacecnt,0,raysrclocobj,rayvecobj,imagedata_zbuffer + src_nx*ycnt + xcnt,imagedata_surfaceid + src_nx*ycnt + xcnt,imagedata_facetid + src_nx*ycnt + xcnt,trace); } } } /* Next phase: evaluate derivatives, etc. */ #pragma omp parallel default(shared) // private(src_ny) { int64_t ycnt; // must be signed (e.g. not size_t) for MSVC compatibility static const uint32_t NaNconst=0x7fc00000; float32_t NaNval; memcpy(&NaNval,&NaNconst,sizeof(NaNval)); #pragma omp for private(ycnt) //src_ny for (ycnt=0;ycnt < src_ny;ycnt++) { int64_t xcnt; // must be signed (e.g. not size_t) for MSVC compatibility float64_t rayvec[4]; float64_t rayvecobj[4]; // rayvec in object coordinates float64_t raysrcloc[4],raysrclocobj[4]; float64_t dist; float64_t horiz_raysrc_shift[3]; float64_t vert_raysrc_shift[3]; float64_t horiz_ray_intersect_shift_deriv[3]; float64_t vert_ray_intersect_shift_deriv[3]; float64_t intersectpoint2uvcanon[3]; // intersection point in texture coordinates (not scaled UV coordinates float64_t intersectpoint2uvcanon_deriv_horiz[3]; float64_t intersectpoint2uvcanon_deriv_vert[3]; float64_t angle_of_incidence,angle_of_incidence_factor; uint32_t surfaceid,facetid; for (xcnt=0; xcnt < src_nx; xcnt++) { // Go through each source image pixel, find derivatives and where it projects in (u,v) space // Dist was calculated by find_ray_intersections(), above dist = imagedata_zbuffer[src_nx*ycnt + xcnt]; if (!isinf(dist)) { /* if we actually got a ray intersection above */ surfaceid=imagedata_surfaceid[ycnt*src_nx + xcnt]; facetid=imagedata_facetid[ycnt*src_nx + xcnt]; // rayvec in camera coordinates rayvec[0]=0; rayvec[1]=0; rayvec[2]=1.0; rayvec[3]=0.0; // "vector" has 0 w-component /* apply correction for camera matrix being in OpenCV coords, but our transforms being in OpenGL coords */ /* We negate rayvec[1] and rayvec[2] rather than messing with tosurfaceframe. */ rayvec[1]=-rayvec[1]; rayvec[2]=-rayvec[2]; // convert to object coordinates multmatvec4(surfacearray[surfaceid].tosurfaceframe,rayvec,rayvecobj); // no need to normalize since a unit vector to begin with (rayvecfactor=1) // calculation of raysrclocobj: Coordinates of ray source in object coordinates: // since we are doing this OpenCV style, negate the 2nd and 3rd columns... see imageprojectionmodel.py // (except we don't actually do this (???)) // ray source location in camera coords raysrcloc[0]=(xcnt - orthographic_proj_matrix[2])/orthographic_proj_matrix[0]; // (u-cu)/fx raysrcloc[1]=-(ycnt - orthographic_proj_matrix[5])/orthographic_proj_matrix[4]; // (v-cv)/fy (negate because tosurfaceframe is in opengl coords) raysrcloc[2]=-0.0; // negate because tosurfaceframe is in OpenGL coords raysrcloc[3]=1.0; /* this is a point */ multmatvec4(surfacearray[surfaceid].tosurfaceframe,raysrcloc,raysrclocobj); normalize_wcoord4(raysrclocobj); // horiz_ray_intersect_shift_deriv it the change of the intersection coordinates per one-pixel // horizontal shift // The ray itself stays in the same direction but shifts laterally in camera coordinates by [ 1/fx, 0, 0 ] // Evaluate derivatives... We convert them to object coordinates, evaluate the shifts // in object coordinates, then shift back. // (it might be marginally more efficient to evaluate the derivatives in camera coordinates) //horiz_raysrc_shift = surfacearray[surfaceid].tosurfaceframe * [ 1/fx ; 0 ; 0 ; 0] // so extract 1st column of tosurfaceframe and divide by fx horiz_raysrc_shift[0]=surfacearray[surfaceid].tosurfaceframe[0*4 + 0]/orthographic_proj_matrix[0]; // /fx horiz_raysrc_shift[1]=surfacearray[surfaceid].tosurfaceframe[1*4 + 0]/orthographic_proj_matrix[0]; // /fx horiz_raysrc_shift[2]=surfacearray[surfaceid].tosurfaceframe[2*4 + 0]/orthographic_proj_matrix[0]; // /fx // vertical shift // The ray itself stays in the same direction but shifts laterally in camera coordinates by [ 0, 1/fy, 0 ] // because tosurfaceframe in opengl coordinates negate 2nd and 3rd column (this is 2nd) vert_raysrc_shift[0]=-surfacearray[surfaceid].tosurfaceframe[0*4 + 1]/orthographic_proj_matrix[4]; // /fy vert_raysrc_shift[1]=-surfacearray[surfaceid].tosurfaceframe[1*4 + 1]/orthographic_proj_matrix[4]; // /fy vert_raysrc_shift[2]=-surfacearray[surfaceid].tosurfaceframe[2*4 + 1]/orthographic_proj_matrix[4]; // /fy orthographic_deriv_raycalcs(surfacearray, raysrclocobj, rayvecobj, surfaceid, facetid, horiz_raysrc_shift, vert_raysrc_shift, /* outputs */ horiz_ray_intersect_shift_deriv, vert_ray_intersect_shift_deriv); //// Determine intersect position shift in camera coordinates with respect to a unit horizontal pixel shift //multmatvec3(surfacearray[surfaceid].vecfromsurfaceframe,horiz_ray_intersect_shift_deriv,horiz_ray_intersect_shift_deriv_cam); /* evaluate left and right predicted z values... add z coordinate of derivative to this pixel's position */ //imagedata_projectzright[src_nx*ycnt + xcnt] = dist + 1.0*horiz_ray_intersect_shift_deriv_cam[2]; //imagedata_projectzleft[src_nx*ycnt + xcnt] = dist - 1.0*horiz_ray_intersect_shift_deriv_cam[2]; //imagedata_horiz_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=1.0*horiz_ray_intersect_shift_deriv_cam[2]; // Determine intersect position shift in camera coordinates with respect to a unit vertical pixel shift //multmatvec3(surfacearray[surfaceid].vecfromsurfaceframe,vert_ray_intersect_shift_deriv,vert_ray_intersect_shift_deriv_cam); /* evaluate up and down predicted z values... add z coordinate of derivative to this pixel's position */ /* NOTE: We are assuming y ordering is like a raster scan (down is increasing y) */ //imagedata_projectzdown[src_nx*ycnt + xcnt] = dist + 1.0*vert_ray_intersect_shift_deriv_cam[2]; //imagedata_projectzup[src_nx*ycnt + xcnt] = dist - 1.0*vert_ray_intersect_shift_deriv_cam[2]; //imagedata_vert_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=vert_ray_intersect_shift_deriv_cam[2]; // So horiz_ray_intersect_shift_deriv // and vert_ray_intersect_shift_deriv // are vectors indicating what happens when you move // one pixel to the right and one pixel down // on the image being projected /* determine angle of incidence, for weighting use */ angle_of_incidence = acos(fabs(dotvecvec3(rayvecobj,&surfacearray[surfaceid].facetnormals[imagedata_facetid[src_nx*ycnt + xcnt]*3]))); // rayvecobj and facetnormals should be unit vectors // Ignore anything at extreme angles of incidence if (angle_of_incidence > 3*M_PI/8) { angle_of_incidence_factor=0.0; } else { // Define factor by which we de-emphasize data at larger angles of incidence angle_of_incidence_factor = cos(angle_of_incidence * (M_PI/2)/(3*M_PI/8)); } if (imagedata_angleofincidence_weighting) { imagedata_angleofincidence_weighting[src_nx*ycnt + xcnt] = angle_of_incidence_factor; } if (imagedata_angleofincidence) { imagedata_angleofincidence[src_nx*ycnt + xcnt] = angle_of_incidence; } if (imagedata_uvcoords || surfacearray[surfaceid].angle_of_incidence_factor_buf_uv) { find_intersect_uv(surfacearray, raysrclocobj, rayvecobj, surfaceid, imagedata_facetid[src_nx*ycnt + xcnt], dist, horiz_ray_intersect_shift_deriv, // only needed if calculating derivatives vert_ray_intersect_shift_deriv, // only needed if calculating derivatives /* outputs */ intersectpoint2uvcanon, // intersection point in texture coordinates (not scaled UV coordinates intersectpoint2uvcanon_deriv_horiz, // also flag for whether to calc derivatives intersectpoint2uvcanon_deriv_vert); if (imagedata_uvcoords) { imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2]=intersectpoint2uvcanon[0]; // Store u coordinate imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2 + 1]=intersectpoint2uvcanon[1]; // store v coordinate } if (surfacearray[surfaceid].angle_of_incidence_factor_buf_uv) { /* if buffer to store angle_of_incidence_factor in uv coordinate frame is provided... */ /* use projecttoimgbuf() to map out projection validity region */ /* we provide angle_of_incidence_factor as the pixel value so that imgbuf ends up with the sum of projection weighting * angle of incidence factor whereas validitybuf ends up with the sum of the angle of incidence factors */ projecttoimgbuf(angle_of_incidence_factor,1.0,intersectpoint2uvcanon,intersectpoint2uvcanon_deriv_horiz,intersectpoint2uvcanon_deriv_vert,surfacearray[surfaceid].angle_of_incidence_factor_buf_uv,NULL,surfacearray[surfaceid].validitybuf,0,surfacearray[surfaceid].nx,surfacearray[surfaceid].ny,MIN_RADIUS_UV_PIXELS,MIN_RADIUS_SRC_PIXELS,BANDWIDTH_FRACTION); } } } else { /* No intersection found at all */ //imagedata_horiz_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=NaNval; //imagedata_vert_ray_intersect_shift_deriv_cam_z[src_nx*ycnt + xcnt]=NaNval; if (imagedata_angleofincidence_weighting) { imagedata_angleofincidence_weighting[src_nx*ycnt + xcnt] = 0.0; } if (imagedata_angleofincidence) { imagedata_angleofincidence[src_nx*ycnt + xcnt]=NaNval; } if (imagedata_uvcoords) { imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2]=NaNval; imagedata_uvcoords[src_nx*ycnt*2 + xcnt*2 + 1]=NaNval; } } } } } } static IPOPS_INLINE void project_orthographic(struct projectionsurface *surfacearray, float32_t *framedata, size_t framecnt, size_t src_nx, size_t src_ny, float64_t *orthographic_proj_matrix, float32_t *imagedata_zbuffer, uint32_t *imagedata_surfaceid, uint32_t *imagedata_facetid, float32_t *imagedata_weighting) { int64_t ycnt; // must be signed (e.g. not size_t) for MSVC compatibility #pragma omp parallel default(shared) private(ycnt) //,src_ny) { #pragma omp for for (ycnt=0;ycnt < src_ny;ycnt++) { size_t xcnt; for (xcnt=0; xcnt < src_nx; xcnt++) { if (!IPOPS_ISINF(imagedata_zbuffer[ycnt*src_nx + xcnt])) { int32_t surfaceid,facetid; float64_t rayvec[4],rayvecobj[4]; // rayvec in object coordinates float64_t raysrcloc[4],raysrclocobj[4]; float64_t horiz_raysrc_shift[3]; float64_t vert_raysrc_shift[3]; float64_t dist; float64_t horiz_ray_intersect_shift_deriv[3]; float64_t vert_ray_intersect_shift_deriv[3]; float64_t intersectpoint2uvcanon[3]; // intersection point in texture coordinates (not scaled UV coordinates float64_t intersectpoint2uvcanon_deriv_horiz[3]; float64_t intersectpoint2uvcanon_deriv_vert[3]; //float64_t angle_of_incidence; /* finite z-buffer means this point maps onto the object */ surfaceid=imagedata_surfaceid[ycnt*src_nx + xcnt]; facetid=imagedata_facetid[ycnt*src_nx + xcnt]; // rayvec in camera coordinates rayvec[0]=0; rayvec[1]=0; rayvec[2]=1.0; rayvec[3]=0.0; // "vector" has 0 w-component /* apply correction for camera matrix being in OpenCV coords, but our transforms being in OpenGL coords */ /* We negate rayvec[1] and rayvec[2] rather than messing with tosurfaceframe. */ rayvec[1]=-rayvec[1]; rayvec[2]=-rayvec[2]; // convert to object coordinates multmatvec4(surfacearray[surfaceid].tosurfaceframe,rayvec,rayvecobj); // no need to normalize since a unit vector to begin with (rayvecfactor=1) // calculation of raysrclocobj: Coordinates of ray source in object coordinates: // since we are doing this OpenCV style, negate the 2nd and 3rd columns... see imageprojectionmodel.py // (except we don't actually do this (???)) // ray source location in camera coords raysrcloc[0]=(xcnt - orthographic_proj_matrix[2])/orthographic_proj_matrix[0]; // (u-cu)/fx raysrcloc[1]=-(ycnt - orthographic_proj_matrix[5])/orthographic_proj_matrix[4]; // (v-cv)/fy (negate because tosurfaceframe is in opengl coords) raysrcloc[2]=-0.0; // negate because tosurfaceframe is in OpenGL coords raysrcloc[3]=1.0; /* this is a point */ multmatvec4(surfacearray[surfaceid].tosurfaceframe,raysrcloc,raysrclocobj); normalize_wcoord4(raysrclocobj); // horiz_ray_intersect_shift_deriv it the change of the intersection coordinates per one-pixel // horizontal shift // The ray itself stays in the same direction but shifts laterally in camera coordinates by [ 1/fx, 0, 0 ] // Evaluate derivatives... We convert them to object coordinates, evaluate the shifts // in object coordinates, then shift back. // (it might be marginally more efficient to evaluate the derivatives in camera coordinates) //horiz_raysrc_shift = surfacearray[surfaceid].tosurfaceframe * [ 1/fx ; 0 ; 0 ; 0] // so extract 1st column of tosurfaceframe and divide by fx horiz_raysrc_shift[0]=surfacearray[surfaceid].tosurfaceframe[0*4 + 0]/orthographic_proj_matrix[0]; // /fx horiz_raysrc_shift[1]=surfacearray[surfaceid].tosurfaceframe[1*4 + 0]/orthographic_proj_matrix[0]; // /fx horiz_raysrc_shift[2]=surfacearray[surfaceid].tosurfaceframe[2*4 + 0]/orthographic_proj_matrix[0]; // /fx // vertical shift // The ray itself stays in the same direction but shifts laterally in camera coordinates by [ 0, 1/fy, 0 ] // because tosurfaceframe in opengl coordinates negate 2nd and 3rd column (this is 2nd) vert_raysrc_shift[0]=-surfacearray[surfaceid].tosurfaceframe[0*4 + 1]/orthographic_proj_matrix[4]; // /fy vert_raysrc_shift[1]=-surfacearray[surfaceid].tosurfaceframe[1*4 + 1]/orthographic_proj_matrix[4]; // /fy vert_raysrc_shift[2]=-surfacearray[surfaceid].tosurfaceframe[2*4 + 1]/orthographic_proj_matrix[4]; // /fy // Then we go back through each pixel and // perform the mapping, stamping down sinc-interpolation // (or similar lowpass interpolation) // patches onto the parameterization output image. // NOTE: These calculations are identical // to the pre-calculations used to do // z-buffering in evaluate_zbuffer, above! // Same for derivatives... orthographic_deriv_raycalcs(surfacearray, raysrclocobj, rayvecobj, surfaceid, facetid, horiz_raysrc_shift, vert_raysrc_shift, /* outputs */ horiz_ray_intersect_shift_deriv, vert_ray_intersect_shift_deriv); // So horiz_ray_intersect_shift_deriv // and vert_ray_intersect_shift_deriv // are vectors indicating what happens when you move // one pixel to the right and one pixel down // on the image being projected // // Now we have to figure out where this data // actually goes... i.e. map onto texture coordinates // // // Find intersection point, as in evaluate_zbuffer(), above // firstidx=surfacearray[surfaceid].vertexidx_indices[facetid]; // //dist = ray_to_plane_distance(focalpointobj,rayvecobj,&surfacearray[surfaceid].vertices[3*surfacearray[surfaceid].vertexidx[firstidx + 0]], &surfacearray[surfaceid].facetnormals[facetid*3]); //assert(dist==imagedata_zbuffer[src_nx*ycnt + xcnt]); // Should get exactly same result as previous calculation //angle_of_incidence = acos(fabs(dotvecvec3(rayvecobj,&surfacearray[surfaceid].facetnormals[facetid*3]))); // rayvecobj and facetnormals should be unit vectors dist=imagedata_zbuffer[src_nx*ycnt + xcnt]; // be sure to allocate 3 numbers of space // for intersectpoint2uvcanon & friends as we will use it // in projective form // find intersectpoint find_intersect_uv(surfacearray, raysrclocobj, rayvecobj, surfaceid,facetid, dist, horiz_ray_intersect_shift_deriv, // only needed if calculating derivatives vert_ray_intersect_shift_deriv, // only needed if calculating derivatives /* outputs */ intersectpoint2uvcanon, // intersection point in texture coordinates (not scaled UV coordinates intersectpoint2uvcanon_deriv_horiz, // also flag for whether to calc derivatives intersectpoint2uvcanon_deriv_vert); // Now we have uv parameterization coordinates // intersectpoint2uvcanon as well as derivatives // of those coordinates representing motion of // one pixel in the projected image to the right (horiz) // and one pixel down (vert). // !!!*** Evaluate intrinsic texture coords -> canonical // texture coords transform and derivatives, here // once implemented ***!!! // Evaluate within 1.5px by bandlimited splat projecttoimgbuf(framedata[src_nx*ycnt+xcnt],imagedata_weighting[src_nx*ycnt + xcnt],intersectpoint2uvcanon,intersectpoint2uvcanon_deriv_horiz,intersectpoint2uvcanon_deriv_vert,surfacearray[surfaceid].imgbuf,surfacearray[surfaceid].weightingbuf,surfacearray[surfaceid].validitybuf,framecnt,surfacearray[surfaceid].nx,surfacearray[surfaceid].ny,MIN_RADIUS_UV_PIXELS,MIN_RADIUS_SRC_PIXELS,BANDWIDTH_FRACTION); // Projecttoimgbuf // accumulates not just the patches, but a validity // factor representing how much input there was in // each destination pixel. The accumulated image // can then be normalized by the validity factor // to get a meaningful estimate (and pixels with // validity factor << 1 should be set to NaN) } } } //#pragma omp barrier /* Now we can use the validity map to normalize the generated image */ /* Actually, no we can't because we might have several different image sources being collected together, and we only want to normalize at the very end. So it wants to be a different function */ /* #pragma omp for for (ycnt=0;ycnt < src_ny;ycnt++) { size_t xcnt; for (xcnt=0; xcnt < src_nx; xcnt++) { } }*/ } }
wino_conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #include "wino_conv_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = WINO_MAX(data[i], ( float )0); if (activation > 0) { data[i] = WINO_MIN(data[i], ( float )activation); } } } static int get_private_mem_size(struct tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = (unsigned long)output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch, int num_thread) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; #pragma omp parallel for num_threads(num_thread) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for (; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 4; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = NULL; if (outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if (outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch) { float* kernel_tm = ( float* )sys_malloc((unsigned long)6 * 6 * inch * outch * sizeof(float)); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}}; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3] = {0}; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } float* kernel_tm_test = kernel_wino; for (int r = 0; r < 9; r++) { int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36; const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36; const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36; const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36; float* ktmp = kernel_tm_test + p / 8 * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm_test += 4 * inch * outch; } free(kernel_tm); } int wino_conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int input_c = input_tensor->dims[1]; int input_h = input_tensor->dims[2]; int input_w = input_tensor->dims[3]; int output_c = output_tensor->dims[1]; int output_h = output_tensor->dims[2]; int output_w = output_tensor->dims[3]; int pad_h = param->pad_h0; int pad_w = param->pad_w0; float* kernel = ( float* )filter_tensor->data; if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } int block_h = (output_h + TILE - 1) / TILE; int block_w = (output_w + TILE - 1) / TILE; int block = block_h * block_w; int padded_inh = TILE * block_h + 2; int padded_inw = TILE * block_w + 2; int pad_inhw = padded_inh * padded_inw; int outw = block_w * TILE; int outh = block_h * TILE; priv_info->input_pad = ( float* )sys_malloc((unsigned long)batch * input_c * pad_inhw * sizeof(float)); memset(priv_info->input_pad, 0, (unsigned long)batch * input_c * pad_inhw * sizeof(float)); priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * (unsigned long)block * output_c * sizeof(float)); priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * (unsigned long)block * input_c * sizeof(float)); priv_info->output_bordered = NULL; if (outw != output_w || outh != output_h) { priv_info->output_bordered = ( float* )sys_malloc((unsigned long)outw * outh * output_c * sizeof(float)); } conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (priv_info->input_pad) { sys_free(priv_info->input_pad); priv_info->input_pad = NULL; } if (priv_info->dot_block) { sys_free(priv_info->dot_block); priv_info->dot_block = NULL; } if (priv_info->transform_input) { sys_free(priv_info->transform_input); priv_info->transform_input = NULL; } if (priv_info->output_bordered) { sys_free(priv_info->output_bordered); priv_info->output_bordered = NULL; } return 0; } int wino_conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int group = param->group; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_c_g = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int input_size_g = in_c_g * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* biases = NULL; if (bias_tensor != NULL) biases = ( float* )bias_tensor->data; for (int i = 0; i < batch; i++) { for (int g = 0; g < group; g++) { pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0); conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g, output + i * out_c * out_h * out_w, priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread); } } if (act_type >= 0) { relu(output, batch * output_size, act_type); } return 0; }
fourier-parallel-pi-xeonphi-openmp.c
// CS 87 - Final Project // Maria-Elena Solano // // Radix-2 Cooley-Tukey Fourier Transform on C^n - parallel 'pi' version // on Xeon Phi // #include <stdio.h> // C's standard I/O library #include <stdlib.h> // C's standard library #include <stdint.h> // C's exact width int types #include <string.h> // C's standard string library #include <time.h> // C's time types (for random init) #include <math.h> // C's math library #include <unistd.h> // C's POSIX API #include <sys/time.h> // System time types #include <omp.h> // OpenMP library // macro/constant definitions #define perror_out(X) perror(X), fflush(stderr) #define stderr_out(...) fprintf(stderr, __VA_ARGS__), \ fflush(stderr) #define print_out(...) printf(__VA_ARGS__), fflush(stdout) #define copy_to(X,Y,Z) memcpy((X), (Y), sizeof(data_t)*(Z)) // data structures // unit data type typedef struct complex{ float re; // real part float im; // imaginary part } data_t; // simple struct to hold transform info typedef struct tr tr_t; typedef struct tr{ uint32_t N; // input size uint32_t P; // number of processors uint32_t Pi; // processor ID data_t* in; // input data data_t* out; // output data_t* tmp_in; // input scratchpad data_t* tmp_out; // output scratchpads uint32_t test_mode; // test mode? uint32_t no_header; // no timing headers? } tr_t; // simple encapsulating struct for timing typedef struct timer{ double start, stop; // start and stop time double elapsed; // elapsed time (if any), in millisecs } tmr_t; // function declarations // setting up the transform int setup_from_args (tr_t* t, int argc, char** argv); void show_usage (); // running the transform int run (tr_t t); int initialize_data (tr_t* t); void cleanup_data (tr_t* t); void butterfly (data_t* out, data_t* in, uint32_t size, uint32_t N); void butterfly_left (data_t* out, data_t* in, uint32_t size, uint32_t N); void butterfly_right (data_t* out, data_t* in, uint32_t size, uint32_t N); // complex arithmetic data_t add (data_t a, data_t b); data_t sub (data_t a, data_t b); data_t mul (data_t a, data_t b); data_t omega(uint32_t N, uint32_t k); // printing and verifying void print_input (tr_t* t); void print_output (tr_t* t); void verify_results (tr_t* t); // timing void timer_start(tmr_t* tm); void timer_stop (tmr_t* tm); // misc void swap_scratchpads(tr_t* t); int is_power_of_two (int x); uint32_t hash (uint32_t x); uint32_t ilog2 (uint32_t x); uint32_t bit_reverse (uint32_t x, uint32_t N); int main(int argc, char** argv){ tr_t t; // main transform object // (1) setup the transform from the command line args (or ret -1 if err) if(setup_from_args(&t, argc, argv)){ exit(EXIT_FAILURE); } // (2) run the transform according to the given args (or ret -1 if err) if(run(t)){ exit(EXIT_FAILURE); } // (3) return exit(EXIT_SUCCESS); } // This function parses the provided command line args into the given transform // object, and initializes it accordingly. Returns 0 if success, or -1 if err). // // t: (ptr to) tr_t object to update // argc: number of command line arguments // argv: array of strings containing the command line args // // returns: 0 if success, or -1 if invalid command line args. // int setup_from_args(tr_t* t, int argc, char** argv){ int ret; // return value from getopt, and int num = 0; // number to be retrieved. // First, zero all the entries of the tr_t (so we can tell whether or not // the entries were filled afterwards simply by checking if they're still 0) memset(t, 0, sizeof(tr_t)); // Then, greedily read all the command line options provided. while((ret = getopt(argc, argv, "n:p:to")) != -1){ switch(ret){ // If option -n, grab the arg, which should be a nonnegative power of 2 // (otherwise return -1 err) case 'n':{ if(!(num = atoi(optarg)) || !(num > 1) || !is_power_of_two(num)){ stderr_out("Invalid input size (should be 2^i for i>0)\n"); show_usage(); goto err; } t->N = num; break; } // If option -p, grab the arg, which should be 1 or a nonneg power of 2 // (otherwise return -1 err) case 'p':{ if(!(num = atoi(optarg)) || !(num > 0) || !is_power_of_two(num)){ stderr_out("Invalid number of procs (should be 2^i for i>0)\n"); show_usage(); goto err; } t->P = num; break; } // If option -o, set t.no_header to 1. case 'o':{ t->no_header = 1; break; } // If option -t, set t.N to 8 and t.test to 'true', and notify user. case 't':{ print_out("Test mode (ignoring provided input size, if any)...\n"); t->N = 8; t->test_mode = 1; break; } // if unknown or missing arg, show usage and return -1 (error) case '?':{ stderr_out("Unknown or missing arg %c\n", optopt); show_usage(); goto err; break; } } } // Finally, validate the options // If the -n option is missing, notify the user, show usage, and return -1. if(!t->N){ stderr_out("Missing option: -n\n"); show_usage(); goto err; } // If the -p option is missing, notify the user, show usage, and return -1. if(!t->P){ stderr_out("Missing option: -p\n"); show_usage(); goto err; } // If more processors than inputs, return -1. if(t->P > t->N){ stderr_out("More processors than inputs!\n"); show_usage(); goto err; } // If too many processors, return -1. if(t->P > 61){ stderr_out("Too many processors! (Only up to 61 cores available)\n"); show_usage(); goto err; } return 0; err: stderr_out("Could not setup the transform from the cmdline args\n"); return -1; } // This function initializes the given transform's data. Returns 0 if success // or -1 if err. // // t: (ptr to) tr_t object to update // // returns: 0 if success, or -1 if error. // int initialize_data(tr_t* t){ uint32_t bit; // We need: // - N entries for the input, and if((t->in = (data_t*)malloc(sizeof(data_t)*t->N)) == NULL){ perror_out("malloc error"); goto err; } // - N entries for the output. if((t->out = (data_t*)malloc(sizeof(data_t)*t->N)) == NULL){ perror_out("malloc error"); goto err; } // Initialize the input with Bernoulli deviates from the L2 unit circle // --i.e. each element drawn from the distribution 1/sqrt{N}*[-1,1]. if(!t->test_mode){ for(srand(hash(time(NULL))), bit = 0; bit < t->N; bit++){ t->in[bit].re = (rand()/(RAND_MAX/2.0)-1.0)/sqrt(t->N); t->in[bit].im = (rand()/(RAND_MAX/2.0)-1.0)/sqrt(t->N); } } // (unless in debug mode, in which case the values are always the same // test case: 0,1,0,1,0,1,0,1 (the output should be 4,0,0,0,-4,0,0,0)) else{ t->in[0].re = 0; t->in[0].im = 0; t->in[1].re = 1; t->in[1].im = 0; t->in[2].re = 0; t->in[2].im = 0; t->in[3].re = 1; t->in[3].im = 0; t->in[4].re = 0; t->in[4].im = 0; t->in[5].re = 1; t->in[5].im = 0; t->in[6].re = 0; t->in[6].im = 0; t->in[7].re = 1; t->in[7].im = 0; print_input(t); } // And return return 0; err: stderr_out("Could not initialize the data\n"); return -1; } // This function frees all the data allocated by the given transform. // // t: (ptr to) tr_t object to update // void cleanup_data(tr_t* t){ // Free all the allocated data (if any) if(t->in != NULL){ free(t->in); t->in = NULL; } if(t->out != NULL){ free(t->out); t->out = NULL; } return; } // This function shows the cmdline interface usage. // void show_usage(){ print_out("\nusage:\n" " fourier-parallel-pi-xeonphi-openmp { -n <n> -p <p> | -t }\n" "\noptions:\n" " -n <n> power of two input size.\n" " -p <p> power of two number of processors (less than n).\n" " -o omit timing headers\n" " -t compare against precomputed input/output.\n" "\n"); } // This function runs the given transform on P separate processors. Returns 0 // if success or -1 if err. // // t: copy of tr_t object to use // // returns: 0 if success, or -1 if error. // int run(tr_t t_shared){ uint32_t size, iter, offset, bit; // butterfly size, offset and iter, uint32_t which_butterfly, which_half; // which butterfly, and which half, tmr_t tm_funnel, tm_tube; // tmr_t objects (for timing) tr_t t; // per-thread copy of shared tr_t. // Initialize the data (or ret -1 err, cleaning up any malloc'd data first) if(initialize_data(&t_shared)){ cleanup_data(&t_shared); goto err; } // Spawn P threads omp_set_num_threads(t_shared.P); #pragma omp parallel private(size, iter, offset, bit, \ which_butterfly, which_half, \ tm_funnel, tm_tube, t) { // Populate the local tr_t object, and determine which Pi is this. t = t_shared; t.Pi = omp_get_thread_num(); // We need: // - N entries for the input scratchpad, and if((t.tmp_in = (data_t*)malloc(sizeof(data_t)*t.N)) == NULL){ perror_out("malloc error"); } // - N entries for the output scratchpad. if((t.tmp_out = (data_t*)malloc(sizeof(data_t)*t.N)) == NULL){ perror_out("malloc error"); } // Copy the full input to the input scratchpad. copy_to(t.tmp_in, t.in, t.N); // (1) Tree stage: // --------------- // Start the timer for the funnel stage timer_start(&tm_funnel); // For the first log P iters // (i.e. butterfly sizes N, N/2, ..., 2(N/P), // iteration stage log P, log P - 1, ..., 1), for(size = t.N, iter = ilog2(t.P); size > t.N/t.P; size /= 2, iter--){ // Determine _which_ butterfly to compute, depending on (Pi >> iter) which_butterfly = (t.Pi >> iter); // Determine _which_ segment_ of size 'size' to compute the butterfly on offset = which_butterfly * size; // Determine _which half_, depending on the parity of the 1st, 2nd, ... // most significant bit of Pi. which_half = ((t.Pi >> (iter - 1)) % 2 == 0); // And compute the butterfly accordingly if(which_half){ butterfly_left( t.tmp_out + offset, // output to the _1st_ half of the scratchpad t.tmp_in + offset, // and reading from the input scratchpad. size, t.N); } else{ butterfly_right( t.tmp_out + offset, // output to the _2nd_ half of the scratchpad t.tmp_in + offset, // and reading from the input scratchpad. size, t.N); } // And swap scratchpads, so that the new results become the input for // the next iteration. swap_scratchpads(&t); } // Stop the timer for the tree stage timer_stop(&tm_funnel); // (2) Cylinder stage // ------------------ // Start the timer for the tube stage timer_start(&tm_tube); // For the last log N/P iters // (i.e. butterfly sizes N/P, (N/P)/2, ..., 2), // we work only on the portion of the output assigned to this processor for(size = t.N/t.P; size > 1; size /= 2){ // Compute 1, 2, ..., (N/P)/2 butterflies of size N/P, (N/P)/2, ..., 2 // over _consecutive_ intervals across the assigned segment (of size N/P) // (which recall starts from (t.N/t.P)). for(offset=(t.N/t.P)*t.Pi; offset<(t.N/t.P)*t.Pi+t.N/t.P; offset+=size){ butterfly( t.tmp_out + offset, // output to the _assigned_ segment, t.tmp_in + offset, // and reading from the _assigned_ segment. size, t.N); } // And swap scratchpads, so that the new results become the input for // the next iteration. swap_scratchpads(&t); } // Stop the timer for the tube stage timer_stop(&tm_tube); // If not in test mode, show elapsed times (total, funnel and tube stages) if(!t.test_mode && t.Pi == 0){ if(!t.no_header){ print_out("n\tp\ttime (total)\t" "time (stage 1)\ttime (stage 2)\n"); }; print_out("%u\t%u\t%lf\t%lf\t%lf\n", t.N, t.P, tm_funnel.elapsed + tm_tube.elapsed, tm_funnel.elapsed, tm_tube.elapsed); } // Otherwise, copy the result from the _assigned_ segment of the input // scratchpad (which will contain the new results at this point) to the // output, in bit-reversed order. else{ for(bit = (t.N/t.P)*t.Pi; bit < (t.N/t.P)*t.Pi+t.N/t.P; bit++) t.out[bit_reverse(bit, ilog2(t.N))] = t.tmp_in[bit]; } // Free the data if(t.tmp_in != NULL){ free(t.tmp_in); t.tmp_in = NULL; } if(t.tmp_out != NULL){ free(t.tmp_out); t.tmp_out = NULL; } } // If in test mode, print out the result, and verify that it is correct. if(t_shared.test_mode){ print_output(&t_shared); verify_results(&t_shared); } // Cleanup the data, and return cleanup_data(&t_shared); return 0; err: stderr_out("Could not run the transform\n"); return -1; } // This function computes an n-point butterfly over the given input array, // placing the results on the given output array. // // out,in: output and input arrays // size: size of the butterfly // N: input size of the broader FFT // void butterfly(data_t* out, data_t* in, uint32_t size, uint32_t N){ // Compute the left butterfly (note it only writes on the _1st_ half of out) butterfly_left (out, in, size, N); // And the left butterfly (note it only writes on the _2nd_ half of out) butterfly_right(out, in, size, N); return; } // This function computes the left half of an n-point butterfly. Note it only // writes to the _first_ half of the output. // // out,in: output and input arrays // size: size of the butterfly // N: input size of the broader FFT // void butterfly_left(data_t* out, data_t* in, uint32_t size, uint32_t N){ uint32_t bit; // For each bit in the segment for(bit = 0; bit < size/2; bit++){ out[bit] = // set the current (output) bit to add( // the sum of in[bit], // the current (input) bit, and in[bit+size/2]); // the (size/2)-next one. } return; } // This function computes the right half of an n-point butterfly. Note it only // writes to the _second_ half of the output. // // out,in: output and input arrays // size: size of the butterfly // N: input size of the broader FFT // void butterfly_right(data_t* out, data_t* in, uint32_t size, uint32_t N){ uint32_t bit; // For each bit in the segment (note we are offsetting out by size/2! for(out += size/2, bit = 0; bit < size/2; bit++){ out[bit] = // set the current (new) bit to mul( // the multiplication of sub( // the sum of the in[bit], // the (size/2)-prev bit, and in[bit+size/2]), // the next one omega(N, // times the Nth root of unity, bit*(N/size))); // to the power of 0,1,2 first, } // 0,2,4 second, etc. return; } // This function returns (by value) the sum of the two given complex numbers. // // a,b: operands (x+yi,z+wi) (of type data_t) // // returns: the sum a+b = (x+z)+(y+w)i // data_t add(data_t a, data_t b){ data_t c; c.re = a.re + b.re; c.im = a.im + b.im; return c; } // This function returns (by value) the sum a+(-b) of the given complex nums. // // (Note: this can be overriden to provide support for more general transforms // on arbitrary fields) // // a,b: operands (x+yi,z+wi) (of type data_t) // // returns: the sum a+(-b) = (x-z)+(y-w)i // data_t sub(data_t a, data_t b){ data_t c; c.re = a.re - b.re; c.im = a.im - b.im; return c; } // This function returns (by value) the mult a*b of the given complex numbers. // // (Note: this can be overriden to provide support for more general transforms // on arbitrary fields) // // a,b: operands (x+yi,z+wi) (of type data_t) // // returns: their mult a*b = (xz-yw) + (xw-yz)i // data_t mul(data_t a, data_t b){ data_t c; c.re = a.re*b.re - a.im*b.im; c.im = a.re*b.im + a.im*b.re; return c; } // This function returns (by value) the primitive N-th root of unity of the // complex field \mathbb{C} the power of k, which is given by // // e^{-2\pi k/N} = (cos 2\pi/N - i sin 2\pi/N)^k (by Euler's formula) // = cos 2\pi/N*k - i sin 2\pi/N*k (by De Moivre's formula) // // (Note: this can be overriden to provide support for more general transforms // on arbitrary fields) // // N: Order of the cyclic group. // k: Power to raise the root of unity to. // // returns: N-th primitive root of unity of \mathbb{C}, raised // to the power of k. // data_t omega(uint32_t N, uint32_t k){ data_t o; o.re = cos(2.0*M_PI/N*k); o.im = -sin(2.0*M_PI/N*k); return o; } // This function prints out the input of the transform. // // t: (ptr to) tr_t object to use. // void print_input(tr_t* t){ uint32_t bit; // entry to print out for(print_out("Input:\n"), bit = 0; bit < t->N; bit++){ print_out("%.1f+%.1fi, ", t->in[bit].re, t->in[bit].im); } print_out("\n"); return; } // This function prints out the output of the transform. // // t: (ptr to) tr_t object to use. // void print_output(tr_t* t){ uint32_t bit; // entry to print out for(print_out("Output:\n"), bit = 0; bit < t->N; bit++){ print_out("%.1f+%.1fi, ", t->out[bit].re, t->out[bit].im); } print_out("\n"); return; } // This function verifies that the output of the transform is correct. // // t: (ptr to) tr_t object to use. // void verify_results(tr_t* t){ // since the input is 0,1,0,1,0,1,0,1; the output should be 4,0,0,0,-4,0,0,0. print_out( (t->out[0].re == 4 && t->out[0].im == 0 && t->out[1].re == 0 && t->out[1].im == 0 && t->out[2].re == 0 && t->out[2].im == 0 && t->out[3].re == 0 && t->out[3].im == 0 && t->out[4].re ==-4 && t->out[4].im == 0 && t->out[5].re == 0 && t->out[5].im == 0 && t->out[6].re == 0 && t->out[6].im == 0 && t->out[7].re == 0 && t->out[7].im == 0) ? "Output is correct. Test passed.\n\n" : "Output is incorrect! Test failed.\n\n"); return; } // This function starts the given tmr_t object. // // Note: using OpenMP's omp_get_wtime for portability. // // timer: (pointer to) tmr_t object to start. // void timer_start(tmr_t* tm){ tm->start = omp_get_wtime(); return; } // This function stops the given tmr_t object, annd calculates the elapsed // time since the call last to start_timer, in milliseconds. // // Note: using OpenMP's omp_get_wtime for portability. // // timer: (pointer to) tmr_t object to stop. // void timer_stop(tmr_t* tm){ tm->stop = omp_get_wtime(); tm->elapsed = (tm->stop - tm->start)*1000.0; return; } // This function swaps the input and output scratchpads of the given tr_t obj. // // t: (ptr to) tr_t object to use. // void swap_scratchpads(tr_t* t){ data_t* temp; temp = t->tmp_in; t->tmp_in = t->tmp_out; t->tmp_out = temp; return; } // This function determines whether or not the given number is a non-negative // power of two. // // From: www.graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2 // // x: value to query. // // returns: 0 if _not_ a non-negative power of 2, 1 otherwise. // int is_power_of_two(int x){ return x && !(x & (x - 1)); } // This function computes a hash of the given uint32 number. // (From http://www.concentric.net/~ttwang/tech/inthash.htm) // // x: number to hash. // // returns: uint32 hash of the given number. // uint32_t hash(uint32_t x){ x = (x + 0x7ed55d16U) + (x << 12); x = (x ^ 0xc761c23cU) ^ (x >> 19); x = (x + 0x165667b1U) + (x << 5); x = (x + 0xd3a2646cU) ^ (x << 9); x = (x + 0xfd7046c5U) + (x << 3); x = (x ^ 0xb55a4f09U) ^ (x >> 16); return x; } // This function computes the log2 of the given uint32_t number. // // From: graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn // // x: value to compute. // // returns: log2(x) // uint32_t ilog2(uint32_t x){ static const int lookup_table[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; return lookup_table[(uint32_t)(x*0x077CB531U) >> 27]; } // This function bit-reverses the given index (represented as a m-bit binary // number), and returns the resulting value as an integer. // // This is used in the final step of the FFT: due to the recursive nature of // the factorization, the butterfly's outputs are always in bit-reversed order // (when represented as log(N)-bit numbers, where N is the input size). So for // instance for a butterfly of size 8, the 1st output of the butterfly actually // maps to the 1 -> 001 -> 100 -> 4th output of the array; while the 3rd output // of the butterfly maps to the 3 -> 011 -> 110 -> 6th output of the array; and // so forth. // // The algorithm below comes from: // // Dietz, H. (2002) The Aggregate Magic Algorithms. University of Kentucky. // http://aggregate.org/MAGIC/#Bit%20Reversal // // which decomposes the (m m-1 ... 1) permutation into log m cycles: first swap // all adjacent bits, then swap every two bits, and so forth. // // Note, however, Dietz' particular algorithm is hard-wired to m=32-bits only: // so in the example above, 3 is not mapped to 110 but to 1100 0000 0000 0000 // 0000 0000 0000 0000; so, at the end, I just right-shift the result by 32-m // bits to obtain the desired value (in this case, 110 -> 6). // // x: Index to bit-reverse. // m: How many bits to use when representing the index. // // returns: Bit-reversed integer value of x, when represented // as a m-bit binary number. // uint32_t bit_reverse(uint32_t x, uint32_t m){ x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); return ((x >> 16) | (x << 16)) >> (32-m); }
quicksort.c
/* * quicksort.c: Example of QucikSort in OpenMP. * * (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com> */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> enum { N = 2 * 1024 * 1024 }; const int threshold = 1000; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } void *xmalloc(size_t size) { void *p = malloc(size); if (!p) { fprintf(stderr, "No enough memory\n"); exit(EXIT_FAILURE); } return p; } int is_nondecreasing_sorted(double *v, int n) { if (n < 2) return 1; // Non-deacreasing sorting: v[0] <= v[1] <= ... <= v[n - 1] for (int i = 1; i < n; i++) { if (v[i - 1] > v[i]) return 0; } return 1; } void swap(double *v, int i, int j) { double temp = v[i]; v[i] = v[j]; v[j] = temp; } int partition(double *v, int low, int high) { double pivot = v[high]; int i = low - 1; for (int j = low; j < high; j++) { if (v[j] <= pivot) { i++; swap(v, i, j); } } swap(v, i + 1, high); return i + 1; } /* * quicksort: Sorting n elements of array v in the non-decreasing order * by quicksort algorithm (complexity in average case is O(n \log n)). */ void quicksort(double *v, int low, int high) { if (low < high) { int k = partition(v, low, high); quicksort(v, low, k - 1); quicksort(v, k + 1, high); } } void quicksort_omp_tasks(double *v, int low, int high) { if (low < high) { if (high - low < threshold) { quicksort(v, low, high); } else { int k = partition(v, low, high); #pragma omp task quicksort_omp_tasks(v, low, k - 1); quicksort_omp_tasks(v, k + 1, high); } } } double run_serial() { double *v = xmalloc(sizeof(*v) * N); srand(0); for (int i = 0; i < N; i++) v[i] = rand() % 1000; double t = wtime(); quicksort(v, 0, N - 1); t = wtime() - t; if (!is_nondecreasing_sorted(v, N)) { fprintf(stderr, "Verification FAILED (serial version)\n"); } free(v); return t; } double run_parallel() { double *v = xmalloc(sizeof(*v) * N); srand(0); for (int i = 0; i < N; i++) v[i] = rand() % 1000; double t = wtime(); #pragma omp parallel { #pragma omp single nowait quicksort_omp_tasks(v, 0, N - 1); } t = wtime() - t; if (!is_nondecreasing_sorted(v, N)) { fprintf(stderr, "Verification FAILED (parallel version)\n"); } free(v); return t; } int main() { printf("Soring by QuickSort, N = %d\n", N); double tserial = run_serial(); double tparallel = run_parallel(); printf("Execution time (serial): %.6f\n", tserial); printf("Execution time (parallel): %.6f\n", tparallel); printf("Speedup: %.2f\n", tserial / tparallel); return 0; }
ompfor-static.c
/* * Static schedule */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int foo(int lower, int upper, int stride) { int i; #pragma omp for schedule(static,3) for (i=lower;i<upper;i+=stride) { printf("Iteration %2d is carried out by thread %2d\n",\ i, omp_get_thread_num()); } } int main(void) { #pragma omp parallel { #pragma omp single printf ("Using %d threads.\n",omp_get_num_threads()); foo(0,10,2); } return 0; }
mc.c
/***************************************************************************** * mc.c: h264 encoder library (Motion Compensation) ***************************************************************************** * Copyright (C) 2003-2008 x264 project * * Authors: Laurent Aimar <fenrir@via.ecp.fr> * Loren Merritt <lorenm@u.washington.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ #include "common.h" #include <omp.h> #ifdef HAVE_MMX #include "x86/mc.h" #endif #ifdef ARCH_PPC #include "ppc/mc.h" #endif static inline void pixel_avg( uint8_t *dst, int i_dst_stride, uint8_t *src1, int i_src1_stride, uint8_t *src2, int i_src2_stride, int i_width, int i_height ) { int x, y; for( y = 0; y < i_height; y++ ) { for( x = 0; x < i_width; x++ ) { dst[x] = ( src1[x] + src2[x] + 1 ) >> 1; } dst += i_dst_stride; src1 += i_src1_stride; src2 += i_src2_stride; } } static inline void pixel_avg_wxh( uint8_t *dst, int i_dst, uint8_t *src1, int i_src1, uint8_t *src2, int i_src2, int width, int height ) { int x, y; for( y = 0; y < height; y++ ) { for( x = 0; x < width; x++ ) { dst[x] = ( src1[x] + src2[x] + 1 ) >> 1; } src1 += i_src1; src2 += i_src2; dst += i_dst; } } /* Implicit weighted bipred only: * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */ #define op_scale2(x) dst[x] = x264_clip_uint8( (src1[x]*i_weight1 + src2[x]*i_weight2 + (1<<5)) >> 6 ) static inline void pixel_avg_weight_wxh( uint8_t *dst, int i_dst, uint8_t *src1, int i_src1, uint8_t *src2, int i_src2, int width, int height, int i_weight1 ) { int y; const int i_weight2 = 64 - i_weight1; for( y = 0; y<height; y++, dst += i_dst, src1 += i_src1, src2 += i_src2 ) { op_scale2(0); op_scale2(1); if(width==2) continue; op_scale2(2); op_scale2(3); if(width==4) continue; op_scale2(4); op_scale2(5); op_scale2(6); op_scale2(7); if(width==8) continue; op_scale2(8); op_scale2(9); op_scale2(10); op_scale2(11); op_scale2(12); op_scale2(13); op_scale2(14); op_scale2(15); } } #undef op_scale2 #define PIXEL_AVG_C( name, width, height ) \ static void name( uint8_t *pix1, int i_stride_pix1, \ uint8_t *pix2, int i_stride_pix2, \ uint8_t *pix3, int i_stride_pix3, int weight ) \ { \ if( weight == 32 )\ pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height ); \ else\ pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height, weight ); \ } PIXEL_AVG_C( pixel_avg_16x16, 16, 16 ) PIXEL_AVG_C( pixel_avg_16x8, 16, 8 ) PIXEL_AVG_C( pixel_avg_8x16, 8, 16 ) PIXEL_AVG_C( pixel_avg_8x8, 8, 8 ) PIXEL_AVG_C( pixel_avg_8x4, 8, 4 ) PIXEL_AVG_C( pixel_avg_4x8, 4, 8 ) PIXEL_AVG_C( pixel_avg_4x4, 4, 4 ) PIXEL_AVG_C( pixel_avg_4x2, 4, 2 ) PIXEL_AVG_C( pixel_avg_2x4, 2, 4 ) PIXEL_AVG_C( pixel_avg_2x2, 2, 2 ) static void mc_copy( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height ) { int y; for( y = 0; y < i_height; y++ ) { memcpy( dst, src, i_width ); src += i_src_stride; dst += i_dst_stride; } } #define TAPFILTER(pix, d) ((pix)[x-2*d] + (pix)[x+3*d] - 5*((pix)[x-d] + (pix)[x+2*d]) + 20*((pix)[x] + (pix)[x+d])) static void hpel_filter( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, int stride, int width, int height ) { int16_t *buf = x264_malloc((width+5)*sizeof(int16_t)); int x, y; #pragma omp parallel for for( y=0; y<height; y++ ) { for( x=-2; x<width+3; x++ ) { int v = TAPFILTER(src,stride); dstv[x] = x264_clip_uint8((v + 16) >> 5); buf[x+2] = v; } for( x=0; x<width; x++ ) dstc[x] = x264_clip_uint8((TAPFILTER(buf+2,1) + 512) >> 10); for( x=0; x<width; x++ ) dsth[x] = x264_clip_uint8((TAPFILTER(src,1) + 16) >> 5); dsth += stride; dstv += stride; dstc += stride; src += stride; } x264_free(buf); } static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1}; static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2}; static void mc_luma( uint8_t *dst, int i_dst_stride, uint8_t *src[4], int i_src_stride, int mvx, int mvy, int i_width, int i_height ) { int qpel_idx = ((mvy&3)<<2) + (mvx&3); int offset = (mvy>>2)*i_src_stride + (mvx>>2); uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride; if( qpel_idx & 5 ) /* qpel interpolation needed */ { uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3); pixel_avg( dst, i_dst_stride, src1, i_src_stride, src2, i_src_stride, i_width, i_height ); } else { mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height ); } } static uint8_t *get_ref( uint8_t *dst, int *i_dst_stride, uint8_t *src[4], int i_src_stride, int mvx, int mvy, int i_width, int i_height ) { int qpel_idx = ((mvy&3)<<2) + (mvx&3); int offset = (mvy>>2)*i_src_stride + (mvx>>2); uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride; if( qpel_idx & 5 ) /* qpel interpolation needed */ { uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3); pixel_avg( dst, *i_dst_stride, src1, i_src_stride, src2, i_src_stride, i_width, i_height ); return dst; } else { *i_dst_stride = i_src_stride; return src1; } } /* full chroma mc (ie until 1/8 pixel)*/ static void mc_chroma( uint8_t *dst, int i_dst_stride, uint8_t *src, int i_src_stride, int mvx, int mvy, int i_width, int i_height ) { uint8_t *srcp; int x, y; const int d8x = mvx&0x07; const int d8y = mvy&0x07; const int cA = (8-d8x)*(8-d8y); const int cB = d8x *(8-d8y); const int cC = (8-d8x)*d8y; const int cD = d8x *d8y; src += (mvy >> 3) * i_src_stride + (mvx >> 3); srcp = &src[i_src_stride]; for( y = 0; y < i_height; y++ ) { for( x = 0; x < i_width; x++ ) { dst[x] = ( cA*src[x] + cB*src[x+1] + cC*srcp[x] + cD*srcp[x+1] + 32 ) >> 6; } dst += i_dst_stride; src = srcp; srcp += i_src_stride; } } #define MC_COPY(W) \ static void mc_copy_w##W( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int i_height ) \ { \ mc_copy( src, i_src, dst, i_dst, W, i_height ); \ } MC_COPY( 16 ) MC_COPY( 8 ) MC_COPY( 4 ) static void plane_copy( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int w, int h) { while( h-- ) { memcpy( dst, src, w ); dst += i_dst; src += i_src; } } static void prefetch_fenc_null( uint8_t *pix_y, int stride_y, uint8_t *pix_uv, int stride_uv, int mb_x ) {} static void prefetch_ref_null( uint8_t *pix, int stride, int parity ) {} static void memzero_aligned( void * dst, int n ) { memset( dst, 0, n ); } void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame ) { uint8_t *src = frame->plane[0]; int i_stride = frame->i_stride[0]; int i_height = frame->i_lines[0]; int i_width = frame->i_width[0]; int x, y; // duplicate last row and column so that their interpolation doesn't have to be special-cased for( y=0; y<i_height; y++ ) src[i_width+y*i_stride] = src[i_width-1+y*i_stride]; h->mc.memcpy_aligned( src+i_stride*i_height, src+i_stride*(i_height-1), i_width ); h->mc.frame_init_lowres_core( src, frame->lowres[0], frame->lowres[1], frame->lowres[2], frame->lowres[3], i_stride, frame->i_stride_lowres, frame->i_width_lowres, frame->i_lines_lowres ); x264_frame_expand_border_lowres( frame ); memset( frame->i_cost_est, -1, sizeof(frame->i_cost_est) ); for( x = 0; x < h->param.i_bframe + 2; x++ ) for( y = 0; y < h->param.i_bframe + 2; y++ ) frame->i_row_satds[y][x][0] = -1; for( y = 0; y <= !!h->param.i_bframe; y++ ) for( x = 0; x <= h->param.i_bframe; x++ ) frame->lowres_mvs[y][x][0][0] = 0x7FFF; } static void frame_init_lowres_core( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, int src_stride, int dst_stride, int width, int height ) { int x,y; for( y=0; y<height; y++ ) { uint8_t *src1 = src0+src_stride; uint8_t *src2 = src1+src_stride; for( x=0; x<width; x++ ) { // slower than naive bilinear, but matches asm #define FILTER(a,b,c,d) ((((a+b+1)>>1)+((c+d+1)>>1)+1)>>1) dst0[x] = FILTER(src0[2*x ], src1[2*x ], src0[2*x+1], src1[2*x+1]); dsth[x] = FILTER(src0[2*x+1], src1[2*x+1], src0[2*x+2], src1[2*x+2]); dstv[x] = FILTER(src1[2*x ], src2[2*x ], src1[2*x+1], src2[2*x+1]); dstc[x] = FILTER(src1[2*x+1], src2[2*x+1], src1[2*x+2], src2[2*x+2]); #undef FILTER } src0 += src_stride*2; dst0 += dst_stride; dsth += dst_stride; dstv += dst_stride; dstc += dst_stride; } } void x264_mc_init( int cpu, x264_mc_functions_t *pf ) { pf->mc_luma = mc_luma; pf->get_ref = get_ref; pf->mc_chroma = mc_chroma; pf->avg[PIXEL_16x16]= pixel_avg_16x16; pf->avg[PIXEL_16x8] = pixel_avg_16x8; pf->avg[PIXEL_8x16] = pixel_avg_8x16; pf->avg[PIXEL_8x8] = pixel_avg_8x8; pf->avg[PIXEL_8x4] = pixel_avg_8x4; pf->avg[PIXEL_4x8] = pixel_avg_4x8; pf->avg[PIXEL_4x4] = pixel_avg_4x4; pf->avg[PIXEL_4x2] = pixel_avg_4x2; pf->avg[PIXEL_2x4] = pixel_avg_2x4; pf->avg[PIXEL_2x2] = pixel_avg_2x2; pf->copy_16x16_unaligned = mc_copy_w16; pf->copy[PIXEL_16x16] = mc_copy_w16; pf->copy[PIXEL_8x8] = mc_copy_w8; pf->copy[PIXEL_4x4] = mc_copy_w4; pf->plane_copy = plane_copy; pf->hpel_filter = hpel_filter; pf->prefetch_fenc = prefetch_fenc_null; pf->prefetch_ref = prefetch_ref_null; pf->memcpy_aligned = memcpy; pf->memzero_aligned = memzero_aligned; pf->frame_init_lowres_core = frame_init_lowres_core; #ifdef HAVE_MMX x264_mc_init_mmx( cpu, pf ); #endif #ifdef ARCH_PPC if( cpu&X264_CPU_ALTIVEC ) x264_mc_altivec_init( pf ); #endif } void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end ) { const int b_interlaced = h->sh.b_mbaff; const int stride = frame->i_stride[0] << b_interlaced; const int width = frame->i_width[0]; int start = (mb_y*16 >> b_interlaced) - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8 int height = ((b_end ? frame->i_lines[0] : mb_y*16) >> b_interlaced) + 8; int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd int x, y; if( mb_y & b_interlaced ) return; for( y=0; y<=b_interlaced; y++, offs+=frame->i_stride[0] ) { h->mc.hpel_filter( frame->filtered[1] + offs, frame->filtered[2] + offs, frame->filtered[3] + offs, frame->plane[0] + offs, stride, width + 16, height - start ); } /* generate integral image: * frame->integral contains 2 planes. in the upper plane, each element is * the sum of an 8x8 pixel region with top-left corner on that point. * in the lower plane, 4x4 sums (needed only with --partitions p4x4). */ if( frame->integral ) { if( start < 0 ) { memset( frame->integral - PADV * stride - PADH, 0, stride * sizeof(uint16_t) ); start = -PADV; } if( b_end ) height += PADV-8; #pragma omp for for( y = start; y < height; y++ ) { uint8_t *ref = frame->plane[0] + y * stride - PADH; uint16_t *line = frame->integral + (y+1) * stride - PADH + 1; uint16_t v = line[0] = 0; for( x = 1; x < stride-1; x++ ) line[x] = v += ref[x] + line[x-stride] - line[x-stride-1]; line -= 8*stride; if( y >= 9-PADV ) { uint16_t *sum4 = line + stride * (frame->i_lines[0] + PADV*2); for( x = 1; x < stride-8; x++, line++, sum4++ ) { sum4[0] = line[4+4*stride] - line[4] - line[4*stride] + line[0]; line[0] += line[8+8*stride] - line[8] - line[8*stride]; } } } } }
bfsdfs.h
#include <vector> namespace TSnap { ///////////////////////////////////////////////// // BFS and DFS /// Returns a directed Breadth-First-Search tree rooted at StartNId. ##GetBfsTree1 template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn); /// Returns the BFS tree size (number of nodes) and depth (number of levels) by following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true) of node StartNId. template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSzX, int& TreeDepthX); /// Finds IDs of all nodes that are at distance Hop from node StartNId. ##GetSubTreeSz template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir=false); /// Returns the number of nodes at each hop distance from the starting node StartNId. ##GetNodesAtHops template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir=false); ///////////////////////////////////////////////// // Shortest paths /// Returns the length of the shortest path from node SrcNId to node DstNId. ##GetShortPath1 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); template <class PGraph> std::vector< int64_t> GetShortPath(const PGraph& Graph, std::vector<std::pair< int64_t, int64_t>> &pairs); /// Returns the length of the shortest path from node SrcNId to all other nodes in the network. ##GetShortPath2 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=TInt::Mx); ///////////////////////////////////////////////// // Diameter /// Returns the (approximation of the) Diameter (maximum shortest path length) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsFullDiam template <class PGraph> int64 GetBfsFullDiam_stl(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter (90-th percentile of the distribution of shortest path lengths) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam1 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter and the Diameter of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam2 template <class PGraph> double GetBfsEffDiam_stl(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX); template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). GetBfsEffDiam3 template <class PGraph> double GetBfsEffDiam_stl(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Use the whole graph (all edges) to measure the shortest path lengths but only report the path lengths between nodes in the SubGraphNIdV. GetBfsEffDiam4 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiamX, int& FullDiamX); // TODO: Implement in the future //template <class PGraph> int GetRangeDist(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=1000); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetShortPath(TIntH& NIdPrnH, TCcQueue<int>& NIdQ, const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> PNGraph GetShortPathsSubGraph(const PGraph& Graph, const TIntV& SubGraphNIdV); //template <class PGraph> PGraph GetWccPathsSubGraph(const PGraph& Graph, const TIntV& NIdV); //template <class PGraph> void GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOutEdges, int& TreeSz, int& TreeDepth); } // namespace TSnap //#////////////////////////////////////////////// /// Breath-First-Search class. /// The class is meant for executing many BFSs over a fixed graph. This means that the class can keep the hash tables and queues initialized between different calls of the DoBfs() function. template<class PGraph> class TBreathFS { public: PGraph Graph; TSnapQueue<int> Queue; TInt StartNId; TIntH NIdDistH; public: TBreathFS(const PGraph& GraphPt, const bool& InitBigQ=true) : Graph(GraphPt), Queue(InitBigQ?Graph->GetNodes():1024), NIdDistH(InitBigQ?Graph->GetNodes():1024) { } /// Sets the graph to be used by the BFS to GraphPt and resets the data structures. void SetGraph(const PGraph& GraphPt); /// Performs BFS from node id StartNode for at maps MxDist steps by only following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true). int DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Same functionality as DoBfs with better performance. int DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Returns the number of nodes visited/reached by the BFS. int GetNVisited() const { return NIdDistH.Len(); } /// Returns the IDs of the nodes visited/reached by the BFS. void GetVisitedNIdV(TIntV& NIdV) const { NIdDistH.GetKeyV(NIdV); } /// Returns the shortst path distance between SrcNId and DistNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetHops(const int& SrcNId, const int& DstNId) const; /// Returns a random shortest path from SrcNId to DstNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const; /* Private variables and functions for DoBfsHybrid */ private: int Stage; // 0, 2: top down, 1: bottom up static const unsigned int alpha = 100; static const unsigned int beta = 20; /* Private functions */ bool TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); bool BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); }; template<class PGraph> void TBreathFS<PGraph>::SetGraph(const PGraph& GraphPt) { Graph=GraphPt; const int N=GraphPt->GetNodes(); if (Queue.Reserved() < N) { Queue.Gen(N); } if (NIdDistH.GetReservedKeyIds() < N) { NIdDistH.Gen(N); } } template<class PGraph> int TBreathFS<PGraph>::DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); // const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // IAssertR(StartNodeI.GetOutDeg() > 0, TStr::Fmt("No neighbors from start node %d.", StartNode)); NIdDistH.Clr(false); NIdDistH.AddDat(StartNId, 0); Queue.Clr(false); Queue.Push(StartNId); int v, MaxDist = 0; while (! Queue.Empty()) { const int NId = Queue.Top(); Queue.Pop(); const int Dist = NIdDistH.GetDat(NId); if (Dist == MxDist) { break; } // max distance limit reached const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { // out-links for (v = 0; v < NodeI.GetOutDeg(); v++) { // out-links const int DstNId = NodeI.GetOutNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } if (FollowIn) { // in-links for (v = 0; v < NodeI.GetInDeg(); v++) { const int DstNId = NodeI.GetInNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } } return MaxDist; } template<class PGraph> int TBreathFS<PGraph>::DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); if (TargetNId == StartNode) return 0; const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // Initialize vector TIntV NIdDistV(Graph->GetMxNId() + 1); for (int i = 0; i < NIdDistV.Len(); i++) { NIdDistV.SetVal(i, -1); } TIntV *Frontier = new TIntV(Graph->GetNodes(), 0); TIntV *NextFrontier = new TIntV(Graph->GetNodes(), 0); NIdDistV.SetVal(StartNId, 0); Frontier->Add(StartNId); Stage = 0; int MaxDist = -1; const unsigned int TotalNodes = Graph->GetNodes(); unsigned int UnvisitedNodes = Graph->GetNodes(); while (! Frontier->Empty()) { MaxDist += 1; NextFrontier->Clr(false); if (MaxDist == MxDist) { break; } // max distance limit reached UnvisitedNodes -= Frontier->Len(); if (Stage == 0 && UnvisitedNodes / Frontier->Len() < alpha) { Stage = 1; } else if (Stage == 1 && TotalNodes / Frontier->Len() > beta) { Stage = 2; } // Top down or bottom up depending on stage bool targetFound = false; if (Stage == 0 || Stage == 2) { targetFound = TopDownStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } else { targetFound = BottomUpStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } if (targetFound) { MaxDist = NIdDistV[TargetNId]; break; } // swap Frontier and NextFrontier TIntV *temp = Frontier; Frontier = NextFrontier; NextFrontier = temp; } delete Frontier; delete NextFrontier; // Transform vector to hash table NIdDistH.Clr(false); for (int NId = 0; NId < NIdDistV.Len(); NId++) { if (NIdDistV[NId] != -1) { NIdDistH.AddDat(NId, NIdDistV[NId]); } } return MaxDist; } template<class PGraph> bool TBreathFS<PGraph>::TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (TIntV::TIter it = Frontier->BegI(); it != Frontier->EndI(); ++it) { // loop over frontier const int NId = *it; const int Dist = NIdDistV[NId]; IAssert(Dist == MaxDist); // Must equal to MaxDist const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int NeighborNId = NodeI.GetOutNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } if (FollowIn) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int NeighborNId = NodeI.GetInNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } } return false; } template<class PGraph> bool TBreathFS<PGraph>::BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (typename PGraph::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { const int NId = NodeI.GetId(); if (NIdDistV[NId] == -1) { if (FollowOut) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int ParentNId = NodeI.GetInNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } if (FollowIn && NIdDistV[NId] == -1) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int ParentNId = NodeI.GetOutNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } } } return false; } template<class PGraph> int TBreathFS<PGraph>::GetHops(const int& SrcNId, const int& DstNId) const { TInt Dist; if (SrcNId!=StartNId) { return -1; } if (! NIdDistH.IsKeyGetDat(DstNId, Dist)) { return -1; } return Dist.Val; } template<class PGraph> int TBreathFS<PGraph>::GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const { PathNIdV.Clr(false); if (SrcNId!=StartNId || ! NIdDistH.IsKey(DstNId)) { return -1; } PathNIdV.Add(DstNId); TIntV CloserNIdV; int CurNId = DstNId; TInt CurDist, NextDist; while (CurNId != SrcNId) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(CurNId); IAssert(NIdDistH.IsKeyGetDat(CurNId, CurDist)); CloserNIdV.Clr(false); for (int e = 0; e < NI.GetDeg(); e++) { const int Next = NI.GetNbrNId(e); if (NIdDistH.IsKeyGetDat(Next, NextDist)) { if (NextDist == CurDist-1) { CloserNIdV.Add(Next); } } } IAssert(! CloserNIdV.Empty()); CurNId = CloserNIdV[TInt::Rnd.GetUniDevInt(CloserNIdV.Len())]; PathNIdV.Add(CurNId); } PathNIdV.Reverse(); return PathNIdV.Len()-1; } ///////////////////////////////////////////////// // Implementation namespace TSnap { template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); PNGraph Tree = TNGraph::New(); BFS.NIdDistH.SortByDat(); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { const int NId = BFS.NIdDistH.GetKey(i); const int Dist = BFS.NIdDistH[i]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); if (!Tree->IsNode(NId)) { Tree->AddNode(NId); } if (FollowOut) { for (int e = 0; e < NI.GetInDeg(); e++) { const int Prev = NI.GetInNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } if (FollowIn) { for (int e = 0; e < NI.GetOutDeg(); e++) { const int Prev = NI.GetOutNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } } return Tree; } template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSz, int& TreeDepth) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); TreeSz = BFS.NIdDistH.Len(); TreeDepth = 0; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { TreeDepth = TMath::Mx(TreeDepth, BFS.NIdDistH[i].Val); } return TreeSz; } template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, Hop); NIdV.Clr(false); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { if (BFS.NIdDistH[i] == Hop) { NIdV.Add(BFS.NIdDistH.GetKey(i)); } } return NIdV.Len(); } template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, TInt::Mx); TIntH HopCntH; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { HopCntH.AddDat(BFS.NIdDistH[i]) += 1; } HopCntH.GetKeyDatPrV(HopCntV); HopCntV.Sort(); return HopCntV.Len(); } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir, const int& MaxDist) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, -1, MaxDist); NIdToDistH.Clr(); NIdToDistH.Swap(BFS.NIdDistH); return NIdToDistH[NIdToDistH.Len()-1]; } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, DstNId, TInt::Mx); return BFS.GetHops(SrcNId, DstNId); } template <class PGraph> std::vector< int64_t> GetShortPath(const PGraph& Graph, std::vector<std::pair< int64_t, int64_t>> &pairs) { std::vector< int64_t> output; for(auto p : pairs) { int64_t len = GetShortPath(Graph, p.first, p.second); output.push_back(len); } return output; } template <class PGraph> int64 GetBfsFullDiam_stl(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam_stl(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return FullDiam; } template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return FullDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return EffDiam; } template <class PGraph> double GetBfsEffDiam_stl(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam) { double AvgDiam; EffDiam = -1; FullDiam = -1; return GetBfsEffDiam_stl(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgDiam); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam) { double AvgDiam; EffDiam = -1; FullDiam = -1; return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgDiam); } template <class PGraph> double GetBfsEffDiam_stl(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { EffDiam = -1; FullDiam = -1; AvgSPL = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); int64_t minnodes = min(( int64_t)NTestNodes, Graph->GetNodes()); for (int tries = 0; tries < minnodes; tries++) { const int64_t NId = tries; //TODO BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { DistToCntH.AddDat(BFS.NIdDistH[i]) += 1; } } TIntFltKdV DistNbrsPdfV; double SumPathL=0, PathCnt=0; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); SumPathL += DistToCntH.GetKey(i) * DistToCntH[i]; PathCnt += DistToCntH[i]; } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) AvgSPL = SumPathL/PathCnt; // average shortest path length return EffDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { EffDiam = -1; FullDiam = -1; AvgSPL = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV; Graph->GetNIdV(NodeIdV); NodeIdV.Shuffle(TInt::Rnd); for (int tries = 0; tries < TMath::Mn(NTestNodes, Graph->GetNodes()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { DistToCntH.AddDat(BFS.NIdDistH[i]) += 1; } } TIntFltKdV DistNbrsPdfV; double SumPathL=0, PathCnt=0; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); SumPathL += DistToCntH.GetKey(i) * DistToCntH[i]; PathCnt += DistToCntH[i]; } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) AvgSPL = SumPathL/PathCnt; // average shortest path length return EffDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiam, int& FullDiam) { EffDiam = -1; FullDiam = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV(SubGraphNIdV); NodeIdV.Shuffle(TInt::Rnd); TInt Dist; for (int tries = 0; tries < TMath::Mn(NTestNodes, SubGraphNIdV.Len()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < SubGraphNIdV.Len(); i++) { if (BFS.NIdDistH.IsKeyGetDat(SubGraphNIdV[i], Dist)) { DistToCntH.AddDat(Dist) += 1; } } } TIntFltKdV DistNbrsPdfV; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) return EffDiam; // average shortest path length } template <class PGraph> int GetShortestDistances(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { PSOut StdOut = TStdOut::New(); int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (ShortestDists[OutNId].Val == InfDepth) { ShortestDists[OutNId] = Depth; PNextV->Add(OutNId); } } } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #ifdef USE_OPENMP template <class PGraph> int GetShortestDistancesMP2(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); #pragma omp parallel for schedule(dynamic,10000) for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth #pragma omp parallel for schedule(dynamic,10000) for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (__sync_bool_compare_and_swap(&(ShortestDists[OutNId].Val), InfDepth, Depth)) { PNextV->AddMP(OutNId); } } } // #pragma omp parallel for schedule(dynamic,10000) // for (int NId = 0; NId < MxNId; NId++) { // if (ShortestDists[NId] == InfDepth) { // typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); // for (int e = 0; e < NI.GetInDeg(); e++) { // const int InNId = NI.GetInNId(e); // if (ShortestDists[InNId] < Depth) { // ShortestDists[NId] = Depth; // PNextV->AddMP(NId); // break; // } // } // } // } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #endif // USE_OPENMP } // namespace TSnap
sam_layer.c
#include "sam_layer.h" #include "utils.h" #include "dark_cuda.h" #include "blas.h" #include <stdio.h> #include <assert.h> layer make_sam_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr,"scale Layer: %d\n", index); layer l = { (LAYER_TYPE)0 }; l.type = SAM; l.batch = batch; l.w = w; l.h = h; l.c = c; l.out_w = w2; l.out_h = h2; l.out_c = c2; assert(l.out_c == l.c); assert(l.w == l.out_w && l.h == l.out_h); l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.outputs; l.index = index; l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.output = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.forward = forward_sam_layer; l.backward = backward_sam_layer; #ifdef GPU l.forward_gpu = forward_sam_layer_gpu; l.backward_gpu = backward_sam_layer_gpu; l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); l.output_gpu = cuda_make_array(l.output, l.outputs*batch); #endif return l; } void resize_sam_layer(layer *l, int w, int h) { l->out_w = w; l->out_h = h; l->outputs = l->out_w*l->out_h*l->out_c; l->inputs = l->outputs; l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float)); l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float)); #ifdef GPU cuda_free(l->output_gpu); cuda_free(l->delta_gpu); l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); #endif } void forward_sam_layer(const layer l, network_state state) { int size = l.batch * l.out_c * l.out_w * l.out_h; //int channel_size = 1; float *from_output = state.net.layers[l.index].output; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { l.output[i] = state.input[i] * from_output[i]; } activate_array(l.output, l.outputs*l.batch, l.activation); } void backward_sam_layer(const layer l, network_state state) { gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); //axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1); //scale_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta); int size = l.batch * l.out_c * l.out_w * l.out_h; //int channel_size = 1; float *from_output = state.net.layers[l.index].output; float *from_delta = state.net.layers[l.index].delta; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { state.delta[i] += l.delta[i] * from_output[i]; // l.delta * from (should be divided by channel_size?) from_delta[i] = state.input[i] * l.delta[i]; // input * l.delta } } #ifdef GPU void forward_sam_layer_gpu(const layer l, network_state state) { int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = 1; sam_gpu(state.net.layers[l.index].output_gpu, size, channel_size, state.input, l.output_gpu); activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } void backward_sam_layer_gpu(const layer l, network_state state) { gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = 1; float *from_output = state.net.layers[l.index].output_gpu; float *from_delta = state.net.layers[l.index].delta_gpu; backward_sam_gpu(l.delta_gpu, size, channel_size, state.input, from_delta, from_output, state.delta); } #endif
GB_unaryop__abs_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_uint8 // op(A') function: GB_tran__abs_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_uint8 ( int16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wino_conv_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #include "wino_conv_kernel_arm.h" #include "api/c_api.h" #include "utility/sys_port.h" #include <math.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <arm_neon.h> #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) #ifdef __aarch64__ #define PER_OUT_CHAN 16 void tran_inp_4(float*, float*, float*, int, int, int); void wino_sgemm_4x16_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_4x4_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_1x16(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_1x4(float* output, const float* input, const float* kernel, long cin); void tran_out_4(float*, float*, int, float*, float*, int); #else #define PER_OUT_CHAN 12 void wino_sgemm_4x12_A17(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_4x4_A17(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_1x12_A17(float* output, const float* input, const float* kernel, long cin); // need to be optimized by neon static inline void wino_sgemm_1x4_cpu(float* output, const float* input, const float* kernel, long cin) { for (int i = 0; i < 4; i++) { float sum = 0; for (int k = 0; k < cin; k++) { sum += input[k] * kernel[k * 4 + i]; } output[i] = sum; } } #endif static inline void trans_kernel_f43(float* ker, float* trans_ker) { /* float G[18]={ 1./4 , 0. , 0. , -1./6 , -1./6 , -1./6 , -1./6 , 1./6 , -1./6 , 1./24 , 1./12 , 1./6 , 1./24 , -1./12 , 1./6 , 0. , 0. , 1. }; float GT[18]={ 1./4 , -1./6, -1./6 , 1./24, 1./24 , 0., 0., -1./6, 1./6 , 1./12, -1./12 , 0., 0., -1./6, -1./6 , 1./6, 1./6 , 1. }; */ float tmp[18] = {0}; float neg_r0_add_r2_x_1_6[6]; // (r0+r2)*1./6 float r0_1_4_add_r2_x_1_6[6]; // (r0*1/4 + r2)*1./6 float r1_1_6[6]; // r1*1/6 float r1_1_12[6]; // r1*1/12 float s_1_6 = 1. / 6.f; for (int j = 0; j < 3; j++) { neg_r0_add_r2_x_1_6[j] = -(ker[j] + ker[6 + j]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (ker[j] * 0.25 + ker[6 + j]) * s_1_6; r1_1_6[j] = ker[3 + j] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 3; j++) { tmp[j] = ker[j] * 0.25; tmp[3 + j] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[6 + j] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[9 + j] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[12 + j] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[15 + j] = ker[6 + j]; } // gemm(6,3,3,G,ker,tmp); done int idx; for (int j = 0; j < 6; j++) { idx = j * 3; neg_r0_add_r2_x_1_6[j] = -(tmp[idx] + tmp[idx + 2]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (tmp[idx] * 0.25 + tmp[idx + 2]) * s_1_6; r1_1_6[j] = tmp[idx + 1] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 6; j++) { idx = j * 6; trans_ker[idx] = tmp[j * 3] * 0.25; trans_ker[idx + 1] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 2] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 3] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 4] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 5] = tmp[j * 3 + 2]; } // gemm(6,6,3,tmp,GT,trans_ker); done } static inline void transform_kernel_f43_tile(struct tensor* filter, float* trans_ker) { int outc = filter->dims[0]; int inc = filter->dims[1]; float* kernel = ( float* )filter->data; float* ker_ptr = trans_ker; for (int i = 0; i < outc; i++) { for (int j = 0; j < inc; j++) { trans_kernel_f43(( float* )(kernel + 9 * (j + i * inc)), ker_ptr); ker_ptr += ELEM_SIZE; } } } // src [out_c][in_c][ELEM_SIZE] // --> dst [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN] static inline void interleave_kernel(float* ker0, float* ker1, int out_c, int in_c) { float* ker1_ptr = ker1; int p, i, j; int nn_out = out_c / PER_OUT_CHAN; for (p = 0; p < nn_out; p++) { int pp = p * PER_OUT_CHAN; for (int s = 0; s < ELEM_SIZE; s++) { for (i = 0; i < in_c; i++) { for (j = 0; j < PER_OUT_CHAN; j++) { *ker1_ptr = ker0[((pp + j) * in_c + i) * ELEM_SIZE + s]; ker1_ptr++; } } } } // cout 4 for (p = (nn_out * PER_OUT_CHAN); p < (out_c & -4); p += 4) { for (int s = 0; s < ELEM_SIZE; s++) { for (i = 0; i < in_c; i++) { for (j = 0; j < 4; j++) { *ker1_ptr = ker0[((p + j) * in_c + i) * ELEM_SIZE + s]; ker1_ptr++; } } } } // cout 1 for (p = (out_c & -4); p < out_c; p++) { for (int s = 0; s < ELEM_SIZE; s++) { for (i = 0; i < in_c; i++) { *ker1_ptr = ker0[(p * in_c + i) * ELEM_SIZE + s]; ker1_ptr++; } } } } static inline void pad_input1(const float* input, float* inp_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad0, int pad1) { int padded_hw = padded_h * padded_w; float* pad_ptr; float* inp_ptr = ( float* )input; int resi_h = padded_h - pad0 - inh; int resi_w = padded_w - pad1 - inw; for (int c = 0; c < inc; c++) { pad_ptr = inp_padded + c * padded_hw; // pad h_top memset(pad_ptr, 0, padded_w * pad0 * sizeof(float)); pad_ptr += pad0 * padded_w; // pad h_mid for (int h = 0; h < inh; h++) { // pad w_left memset(pad_ptr, 0, pad1 * sizeof(float)); // pad w_mid memcpy(pad_ptr + pad1, inp_ptr, inw * sizeof(float)); // pad w_end memset(pad_ptr + pad1 + inw, 0, resi_w * sizeof(float)); inp_ptr += inw; pad_ptr += padded_w; } // pad h_bottom memset(pad_ptr, 0, padded_w * resi_h * sizeof(float)); } } static inline void trans_inp_1tile(float* input, float* inp_ptr, int ih, int jw, int c, int in_hw, int inw) { float* inp = ( float* )input + c * in_hw + ih * 4 * inw + jw * 4; float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float tmp[36] = {0}; float r1_add_r2[6]; float r3_add_r4[6]; float r1_minus_r2[6]; float r3_minus_r4[6]; float r4_minus_r2[6]; float r1_minus_r3[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = inp1[j] + inp2[j]; r1_minus_r2[j] = inp1[j] - inp2[j]; r3_add_r4[j] = inp3[j] + inp4[j]; r3_minus_r4[j] = inp3[j] - inp4[j]; r4_minus_r2[j] = inp4[j] - inp2[j]; r1_minus_r3[j] = inp1[j] - inp3[j]; } for (int j = 0; j < 6; j++) { tmp[j] = 4 * inp0[j] - 5 * inp2[j] + inp4[j]; tmp[6 + j] = r3_add_r4[j] - 4 * r1_add_r2[j]; tmp[12 + j] = 4 * r1_minus_r2[j] - r3_minus_r4[j]; tmp[18 + j] = r4_minus_r2[j] - 2 * r1_minus_r3[j]; tmp[24 + j] = r4_minus_r2[j] + 2 * r1_minus_r3[j]; tmp[30 + j] = 4 * inp1[j] - 5 * inp3[j] + inp5[j]; } float r1_4_minus_r3[6]; float r4_minus_4_r2[6]; float r4_minus_r2_[6]; float r1_minus_r3_x2[6]; for (int j = 0; j < 6; j++) { r4_minus_r2_[j] = tmp[j * 6 + 4] - tmp[j * 6 + 2]; r1_4_minus_r3[j] = 4 * tmp[j * 6 + 1] - tmp[j * 6 + 3]; r4_minus_4_r2[j] = tmp[j * 6 + 4] - 4 * tmp[j * 6 + 2]; r1_minus_r3_x2[j] = 2 * (tmp[j * 6 + 1] - tmp[j * 6 + 3]); } for (int j = 0; j < 6; j++) { inp_ptr[j * 6] = 4 * tmp[j * 6] - 5 * tmp[j * 6 + 2] + tmp[j * 6 + 4]; inp_ptr[1 + j * 6] = r4_minus_4_r2[j] - r1_4_minus_r3[j]; inp_ptr[2 + j * 6] = r4_minus_4_r2[j] + r1_4_minus_r3[j]; inp_ptr[3 + j * 6] = r4_minus_r2_[j] - r1_minus_r3_x2[j]; inp_ptr[4 + j * 6] = r4_minus_r2_[j] + r1_minus_r3_x2[j]; inp_ptr[5 + j * 6] = 4 * tmp[j * 6 + 1] - 5 * tmp[j * 6 + 3] + tmp[j * 6 + 5]; } } static inline void trans_inp_4_cpu(float* inp, float* inp_ptr, int inw, int s_size) { float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float mid[36 * 4] = {0}; float r4_minus_r2[24]; float r1_4_minus_r3[24]; float r4_minus_4_r2[24]; float r1_minus_r3_x2[24]; for (int i = 0; i < 6; i++) { // 0 mid[i * 4] = 4 * inp0[i] - 5 * inp2[i] + inp4[i]; mid[(30 + i) * 4] = 4 * inp1[i] - 5 * inp3[i] + inp5[i]; r1_minus_r3_x2[i * 4 + 0] = (inp1[i] - inp3[i]) * 2; r1_4_minus_r3[i * 4 + 0] = 4 * inp1[i] - inp3[i]; r4_minus_4_r2[i * 4 + 0] = inp4[i] - 4 * inp2[i]; r4_minus_r2[i * 4 + 0] = inp4[i] - inp2[i]; // 1 mid[i * 4 + 1] = 4 * inp0[i + 4] - 5 * inp2[i + 4] + inp4[i + 4]; mid[(30 + i) * 4 + 1] = 4 * inp1[i + 4] - 5 * inp3[i + 4] + inp5[i + 4]; r1_minus_r3_x2[i * 4 + 1] = (inp1[i + 4] - inp3[i + 4]) * 2; r1_4_minus_r3[i * 4 + 1] = 4 * inp1[i + 4] - inp3[i + 4]; r4_minus_4_r2[i * 4 + 1] = inp4[i + 4] - 4 * inp2[i + 4]; r4_minus_r2[i * 4 + 1] = inp4[i + 4] - inp2[i + 4]; // 2 mid[i * 4 + 2] = 4 * inp0[i + 8] - 5 * inp2[i + 8] + inp4[i + 8]; mid[(30 + i) * 4 + 2] = 4 * inp1[i + 8] - 5 * inp3[i + 8] + inp5[i + 8]; r1_minus_r3_x2[i * 4 + 2] = (inp1[i + 8] - inp3[i + 8]) * 2; r1_4_minus_r3[i * 4 + 2] = 4 * inp1[i + 8] - inp3[i + 8]; r4_minus_4_r2[i * 4 + 2] = inp4[i + 8] - 4 * inp2[i + 8]; r4_minus_r2[i * 4 + 2] = inp4[i + 8] - inp2[i + 8]; // 3 mid[i * 4 + 3] = 4 * inp0[i + 12] - 5 * inp2[i + 12] + inp4[i + 12]; mid[(30 + i) * 4 + 3] = 4 * inp1[i + 12] - 5 * inp3[i + 12] + inp5[i + 12]; r1_minus_r3_x2[i * 4 + 3] = (inp1[i + 12] - inp3[i + 12]) * 2; r1_4_minus_r3[i * 4 + 3] = 4 * inp1[i + 12] - inp3[i + 12]; r4_minus_4_r2[i * 4 + 3] = inp4[i + 12] - 4 * inp2[i + 12]; r4_minus_r2[i * 4 + 3] = inp4[i + 12] - inp2[i + 12]; } //==================================================================== // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // mid[(6 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // mid[(12 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // mid[(18 + i) * 4 + k] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // mid[(24 + i) * 4 + k] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // } // } float32x4_t r0 = vld1q_f32(r4_minus_4_r2); float32x4_t r1 = vld1q_f32(r4_minus_4_r2 + 4); float32x4_t r2 = vld1q_f32(r4_minus_4_r2 + 8); float32x4_t r3 = vld1q_f32(r4_minus_4_r2 + 12); float32x4_t r4 = vld1q_f32(r4_minus_4_r2 + 16); float32x4_t r5 = vld1q_f32(r4_minus_4_r2 + 20); float32x4_t r0_ = vld1q_f32(r1_4_minus_r3); float32x4_t r1_ = vld1q_f32(r1_4_minus_r3 + 4); float32x4_t r2_ = vld1q_f32(r1_4_minus_r3 + 8); float32x4_t r3_ = vld1q_f32(r1_4_minus_r3 + 12); float32x4_t r4_ = vld1q_f32(r1_4_minus_r3 + 16); float32x4_t r5_ = vld1q_f32(r1_4_minus_r3 + 20); float32x4_t line0_0 = vld1q_f32(mid); float32x4_t line0_1 = vld1q_f32(mid + 4); float32x4_t line0_2 = vld1q_f32(mid + 8); float32x4_t line0_3 = vld1q_f32(mid + 12); float32x4_t line0_4 = vld1q_f32(mid + 16); float32x4_t line0_5 = vld1q_f32(mid + 20); float32x4_t line1_0 = vsubq_f32(r0, r0_); // mid[(6 + i) * 4 + k] [1][0] float32x4_t line1_1 = vsubq_f32(r1, r1_); // mid[(6 + i) * 4 + k] [1][1] float32x4_t line1_2 = vsubq_f32(r2, r2_); // mid[(6 + i) * 4 + k] [1][2] float32x4_t line1_3 = vsubq_f32(r3, r3_); // mid[(6 + i) * 4 + k] [1][3] float32x4_t line1_4 = vsubq_f32(r4, r4_); // mid[(6 + i) * 4 + k] [1][4] float32x4_t line1_5 = vsubq_f32(r5, r5_); // mid[(6 + i) * 4 + k] [1][5] float32x4_t line2_0 = vaddq_f32(r0, r0_); // mid[(12 + i) * 4 + k] [2][0] float32x4_t line2_1 = vaddq_f32(r1, r1_); // mid[(12 + i) * 4 + k] [2][1] float32x4_t line2_2 = vaddq_f32(r2, r2_); // mid[(12 + i) * 4 + k] [2][2] float32x4_t line2_3 = vaddq_f32(r3, r3_); // mid[(12 + i) * 4 + k] [2][3] float32x4_t line2_4 = vaddq_f32(r4, r4_); // mid[(12 + i) * 4 + k] [2][4] float32x4_t line2_5 = vaddq_f32(r5, r5_); // mid[(12 + i) * 4 + k] [2][5] r0 = vld1q_f32(r4_minus_r2); r1 = vld1q_f32(r4_minus_r2 + 4); r2 = vld1q_f32(r4_minus_r2 + 8); r3 = vld1q_f32(r4_minus_r2 + 12); r4 = vld1q_f32(r4_minus_r2 + 16); r5 = vld1q_f32(r4_minus_r2 + 20); r0_ = vld1q_f32(r1_minus_r3_x2); r1_ = vld1q_f32(r1_minus_r3_x2 + 4); r2_ = vld1q_f32(r1_minus_r3_x2 + 8); r3_ = vld1q_f32(r1_minus_r3_x2 + 12); r4_ = vld1q_f32(r1_minus_r3_x2 + 16); r5_ = vld1q_f32(r1_minus_r3_x2 + 20); float32x4_t line5_0 = vld1q_f32(mid + 120); float32x4_t line5_1 = vld1q_f32(mid + 124); float32x4_t line5_2 = vld1q_f32(mid + 128); float32x4_t line5_3 = vld1q_f32(mid + 132); float32x4_t line5_4 = vld1q_f32(mid + 136); float32x4_t line5_5 = vld1q_f32(mid + 140); float32x4_t line3_0 = vsubq_f32(r0, r0_); // mid[(18 + i) * 4 + k] [3][0] float32x4_t line3_1 = vsubq_f32(r1, r1_); // mid[(18 + i) * 4 + k] [3][1] float32x4_t line3_2 = vsubq_f32(r2, r2_); // mid[(18 + i) * 4 + k] [3][2] float32x4_t line3_3 = vsubq_f32(r3, r3_); // mid[(18 + i) * 4 + k] [3][3] float32x4_t line3_4 = vsubq_f32(r4, r4_); // mid[(18 + i) * 4 + k] [3][4] float32x4_t line3_5 = vsubq_f32(r5, r5_); // mid[(18 + i) * 4 + k] [3][5] float32x4_t line4_0 = vaddq_f32(r0, r0_); // mid[(24 + i) * 4 + k] [4][0] float32x4_t line4_1 = vaddq_f32(r1, r1_); // mid[(24 + i) * 4 + k] [4][1] float32x4_t line4_2 = vaddq_f32(r2, r2_); // mid[(24 + i) * 4 + k] [4][2] float32x4_t line4_3 = vaddq_f32(r3, r3_); // mid[(24 + i) * 4 + k] [4][3] float32x4_t line4_4 = vaddq_f32(r4, r4_); // mid[(24 + i) * 4 + k] [4][4] float32x4_t line4_5 = vaddq_f32(r5, r5_); // mid[(24 + i) * 4 + k] [4][5] // r4_minus_r2[i * 4 + k] i=0 = mid[0][4] r0 = vsubq_f32(line0_4, line0_2); r1 = vsubq_f32(line1_4, line1_2); r2 = vsubq_f32(line2_4, line2_2); r3 = vsubq_f32(line3_4, line3_2); r4 = vsubq_f32(line4_4, line4_2); r5 = vsubq_f32(line5_4, line5_2); r0_ = vsubq_f32(line0_1, line0_3); r1_ = vsubq_f32(line1_1, line1_3); r2_ = vsubq_f32(line2_1, line2_3); r3_ = vsubq_f32(line3_1, line3_3); r4_ = vsubq_f32(line4_1, line4_3); r5_ = vsubq_f32(line5_1, line5_3); float32x4_t const2 = vdupq_n_f32(2.f); r0_ = vmulq_f32(r0_, const2); r1_ = vmulq_f32(r1_, const2); r2_ = vmulq_f32(r2_, const2); r3_ = vmulq_f32(r3_, const2); r4_ = vmulq_f32(r4_, const2); r5_ = vmulq_f32(r5_, const2); vst1q_f32(inp_ptr + s_size * 3, vsubq_f32(r0, r0_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 9, vsubq_f32(r1, r1_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 15, vsubq_f32(r2, r2_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 21, vsubq_f32(r3, r3_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 27, vsubq_f32(r4, r4_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 33, vsubq_f32(r5, r5_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 4, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 10, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 16, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 22, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 28, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 34, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (4 + i * 6)] float32x4_t const4 = vdupq_n_f32(4.f); float32x4_t const5 = vdupq_n_f32(-5.f); r0_ = vmulq_f32(line0_1, const4); // line 1*4 ======== r1_ = vmulq_f32(line1_1, const4); r2_ = vmulq_f32(line2_1, const4); r3_ = vmulq_f32(line3_1, const4); r4_ = vmulq_f32(line4_1, const4); r5_ = vmulq_f32(line5_1, const4); float32x4_t rr0_ = vsubq_f32(r0_, line0_3); // line1*4-line3 float32x4_t rr1_ = vsubq_f32(r1_, line1_3); float32x4_t rr2_ = vsubq_f32(r2_, line2_3); float32x4_t rr3_ = vsubq_f32(r3_, line3_3); float32x4_t rr4_ = vsubq_f32(r4_, line4_3); float32x4_t rr5_ = vsubq_f32(r5_, line5_3); r0 = vmulq_f32(line0_2, const4); r1 = vmulq_f32(line1_2, const4); r2 = vmulq_f32(line2_2, const4); r3 = vmulq_f32(line3_2, const4); r4 = vmulq_f32(line4_2, const4); r5 = vmulq_f32(line5_2, const4); r0 = vsubq_f32(line0_4, r0); // line4 -4*line2 r1 = vsubq_f32(line1_4, r1); r2 = vsubq_f32(line2_4, r2); r3 = vsubq_f32(line3_4, r3); r4 = vsubq_f32(line4_4, r4); r5 = vsubq_f32(line5_4, r5); vst1q_f32(inp_ptr + s_size * 1, vsubq_f32(r0, rr0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 7, vsubq_f32(r1, rr1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 13, vsubq_f32(r2, rr2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 19, vsubq_f32(r3, rr3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 25, vsubq_f32(r4, rr4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 31, vsubq_f32(r5, rr5_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 2, vaddq_f32(r0, rr0_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 8, vaddq_f32(r1, rr1_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 14, vaddq_f32(r2, rr2_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 20, vaddq_f32(r3, rr3_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 26, vaddq_f32(r4, rr4_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 32, vaddq_f32(r5, rr5_)); // inp_ptr[ s_size * (2 + i * 6)] r0_ = vaddq_f32(line0_5, r0_); // 5 + 1*4 r1_ = vaddq_f32(line1_5, r1_); r2_ = vaddq_f32(line2_5, r2_); r3_ = vaddq_f32(line3_5, r3_); r4_ = vaddq_f32(line4_5, r4_); r5_ = vaddq_f32(line5_5, r5_); r0 = vmulq_f32(line0_3, const5); r1 = vmulq_f32(line1_3, const5); r2 = vmulq_f32(line2_3, const5); r3 = vmulq_f32(line3_3, const5); r4 = vmulq_f32(line4_3, const5); r5 = vmulq_f32(line5_3, const5); vst1q_f32(inp_ptr + s_size * 5, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 11, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 17, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 23, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 29, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 35, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (5 + i * 6)] r0 = vmulq_f32(line0_0, const4); r1 = vmulq_f32(line1_0, const4); r2 = vmulq_f32(line2_0, const4); r3 = vmulq_f32(line3_0, const4); r4 = vmulq_f32(line4_0, const4); r5 = vmulq_f32(line5_0, const4); r0_ = vmulq_f32(line0_2, const5); r1_ = vmulq_f32(line1_2, const5); r2_ = vmulq_f32(line2_2, const5); r3_ = vmulq_f32(line3_2, const5); r4_ = vmulq_f32(line4_2, const5); r5_ = vmulq_f32(line5_2, const5); r0 = vaddq_f32(r0, line0_4); r1 = vaddq_f32(r1, line1_4); r2 = vaddq_f32(r2, line2_4); r3 = vaddq_f32(r3, line3_4); r4 = vaddq_f32(r4, line4_4); r5 = vaddq_f32(r5, line5_4); vst1q_f32(inp_ptr + s_size * 0, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 6, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 12, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 18, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 24, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 30, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (1 + i * 6)] // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // r4_minus_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - mid[(i * 6 + 2) * 4 + k]; // r1_4_minus_r3[i * 4 + k] = 4 * mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]; // r4_minus_4_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - 4 * mid[(i * 6 + 2) * 4 + k]; // r1_minus_r3_x2[i * 4 + k] = 2 * (mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]); // } // } // for(int i = 1; i < 2; i++) // { // for(int k = 0; k < 4; k++) // { // inp_ptr[k + s_size * (i * 6)] = // 4 * mid[(i * 6) * 4 + k] - 5 * mid[(i * 6 + 2) * 4 + k] + mid[(i * 6 + 4) * 4 + k]; // // // inp_ptr[k + s_size * (1 + i * 6)] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (2 + i * 6)] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (3 + i * 6)] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (4 + i * 6)] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (5 + i * 6)] = // // // 4 * mid[(i * 6 + 1) * 4 + k] - 5 * mid[(i * 6 + 3) * 4 + k] + mid[(i * 6 + 5) * 4 + k]; // } // } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] static inline void tran_input_4block(const float* input, float* trans_inp, int inc, int block_h, int block_w, int inh, int inw, int num_thread) { int in_hw = inh * inw; int block_hw = block_h * block_w; int nn_block = block_hw >> 2; int idxh[4]; int idxw[4]; #pragma omp parallel for num_threads(num_thread) shared(block_hw,nn_block,in_hw) private(idxh,idxw) for (int ib = 0; ib < nn_block; ib++) { float* inp_ptr_4tile = trans_inp + ib * 4 * ELEM_SIZE * inc; idxh[0] = (ib * 4) / block_w; idxh[1] = (ib * 4 + 1) / block_w; idxh[2] = (ib * 4 + 2) / block_w; idxh[3] = (ib * 4 + 3) / block_w; idxw[0] = (ib * 4) % block_w; idxw[1] = (ib * 4 + 1) % block_w; idxw[2] = (ib * 4 + 2) % block_w; idxw[3] = (ib * 4 + 3) % block_w; if (idxh[0] == idxh[3]) { float* temp_inp_ptr = ( float* )(input + idxh[0] * 4 * inw + idxw[0] * 4); for (int c = 0; c < inc; c++) { #ifdef __aarch64__ float ker00[4] = {1, 2, 4, 5}; tran_inp_4(temp_inp_ptr, inp_ptr_4tile + 4 * c, ker00, inw, inc * 16, in_hw); temp_inp_ptr += in_hw; #else trans_inp_4_cpu(temp_inp_ptr, inp_ptr_4tile + c * 4, inw, inc * 4); temp_inp_ptr += in_hw; #endif } } else { float buffer0[inc * ELEM_SIZE * 4]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { trans_inp_1tile(( float* )input, buffer, idxh[0], idxw[0], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[1], idxw[1], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[2], idxw[2], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[3], idxw[3], c, in_hw, inw); buffer += ELEM_SIZE; } // interleave float* tmp_inp = inp_ptr_4tile; for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { for (int j = 0; j < 4; j++) { *tmp_inp = buffer0[i * ELEM_SIZE * 4 + j * ELEM_SIZE + s]; tmp_inp++; } } } // end interleave } } } static inline void tran_input_resi_block(const float* input, float* trans_inp, int inc, int nn_block, int resi_block, int block_hw, int block_w, int in_hw, int inw) { float* inp_ptr = trans_inp + nn_block * 4 * ELEM_SIZE * inc; for (int ib = resi_block; ib < block_hw; ib++) { float buffer0[ELEM_SIZE * inc]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { int ih = ib / block_w; int jw = ib % block_w; trans_inp_1tile(( float* )input, buffer, ih, jw, c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { *inp_ptr = buffer0[i * ELEM_SIZE + s]; inp_ptr++; } } // end interleave } } static inline float do_activation(float value, int activation) { if (activation >= 0) value = WINO_MAX(value, 0); if (activation == 6) value = WINO_MIN(value, 6); return value; } static inline void trans_output_f43(const float* mid, float* out, int outw, const float* bias_ptr, int activation) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float* out0 = out; float* out1 = out0 + outw; float* out2 = out1 + outw; float* out3 = out2 + outw; float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0] + bias, activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1] + bias, activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2] + bias, activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3] + bias, activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0] + bias, activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1] + bias, activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2] + bias, activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3] + bias, activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0] + bias, activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1] + bias, activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2] + bias, activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3] + bias, activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5] + bias, activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5] + bias, activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5] + bias, activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5] + bias, activation); } else { out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0], activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1], activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2], activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3], activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0], activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1], activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2], activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3], activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0], activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1], activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2], activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3], activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5], activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5], activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5], activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5], activation); } } static inline void trans_output_f43_ordinary(const float* mid, float* out, const float* bias_ptr) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = bias + tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = bias + _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = bias + _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = bias + _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } else { for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } } static inline void transform_output_f43_1tile(const float* buffer_ptr, float* out, int p_idx, int idx_blockhw, int block_h, int block_w, int out_hw, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; if (bias) { bias_ptr = (bias + cout_idx); } float* out_ptr = out + cout_idx * out_hw; int i_h = idx_blockhw / block_w; int j_w = idx_blockhw % block_w; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * TILE + ww], activation); } } } buffer_ptr += ELEM_SIZE; } } static inline void transform_output_f43_4tile(float* buffer_ptr, float* out, int p_idx, int block_idx, int block_h, int block_w, int outh, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { int out_hw = outh * outw; float tmp_buffer[TILE * TILE]; int idx_h[4]; int idx_w[4]; idx_h[0] = (block_idx) / block_w; idx_h[1] = (block_idx + 1) / block_w; idx_h[2] = (block_idx + 2) / block_w; idx_h[3] = (block_idx + 3) / block_w; idx_w[0] = (block_idx) % block_w; idx_w[1] = (block_idx + 1) % block_w; idx_w[2] = (block_idx + 2) % block_w; idx_w[3] = (block_idx + 3) % block_w; float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; float* out_ptr = out + cout_idx * out_hw; if (bias) { bias_ptr = ( float* )bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff buffer_ptr += ELEM_SIZE; } } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] // kernel [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN] static void wino_sgemm_set(const float* ker, const float* inp, float* output, const float* bias, int cin, int cout_end, int block_h, int block_w, int out_h, int out_w, int resi_h, int resi_w, int activation, int num_thread, int cpu_affinity) { int flag_outw = 1; if (out_w < 16) flag_outw = 0; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < (cout_end & -PER_OUT_CHAN); p += PER_OUT_CHAN) { int out_hw = out_w * out_h; int block_hw = block_h * block_w; const float* ker_ptr = ker + p * ELEM_SIZE * cin; int i = 0; for (; i < (block_hw & -4); i += 4) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[PER_OUT_CHAN * 4 * ELEM_SIZE]; #ifdef __aarch64__ int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; int wino_out_4_tiles = 0; int mulitplier = PER_OUT_CHAN; if (flag_outw) { if ((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1))) { wino_out_4_tiles = 1; mulitplier = 1; } } for (int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x16_A72(out_buffer + s * 4 * mulitplier, inp_ptr + s * 4 * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin, wino_out_4_tiles); } if (wino_out_4_tiles == 1) { float* bias_ptr = NULL; for (int pss = 0; pss < PER_OUT_CHAN; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE; if (bias) { bias_ptr = ( float* )(bias + cout_idx); } float ker00[4] = {2, 4, 8, 0}; tran_out_4(out_buffer + pss * ELEM_SIZE * 4, out_ptr, out_w * sizeof(float), ker00, bias_ptr, activation); } } else { float buffer[PER_OUT_CHAN * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < PER_OUT_CHAN; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * PER_OUT_CHAN + pp * 4 + t]; buffer_ptr0++; } } } // end interleave { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int pss = 0; pss < PER_OUT_CHAN; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw; if (bias) { bias_ptr = bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer + ii * ELEM_SIZE + pss * 36 * 4, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, (const float*)bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer + ii * ELEM_SIZE + pss * 36 * 4, tmp_buffer, (const float*)bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff } } } // end transform } #else for (int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x12_A17(out_buffer + s * 4 * PER_OUT_CHAN, inp_ptr + s * 4 * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin); } float buffer[PER_OUT_CHAN * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < PER_OUT_CHAN; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * PER_OUT_CHAN + pp * 4 + t]; buffer_ptr0++; } } } transform_output_f43_4tile(buffer, output, p, i, block_h, block_w, out_h, out_w, resi_h, resi_w, PER_OUT_CHAN, bias, activation); #endif } for (; i < block_hw; i++) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[PER_OUT_CHAN * ELEM_SIZE]; for (int s = 0; s < ELEM_SIZE; s++) { #ifdef __aarch64__ wino_sgemm_1x16(out_buffer + s * PER_OUT_CHAN, inp_ptr + s * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin); #else wino_sgemm_1x12_A17(out_buffer + s * PER_OUT_CHAN, inp_ptr + s * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin); #endif } // interleave float buffer[PER_OUT_CHAN * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < PER_OUT_CHAN; pp++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * PER_OUT_CHAN + pp]; buffer_ptr0++; } } // end interleave transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, PER_OUT_CHAN, bias, activation); // end transform } } } void wino_sgemm_4x4(const float* ker, const float* inp, float* output, const float* bias, int cin, int cout_start, int cout_end, int block_h, int block_w, int out_h, int out_w, int resi_h, int resi_w, int activation, int num_thread, int cpu_affinity) { int flag_outw = 1; if (out_w < 16) flag_outw = 0; #pragma omp parallel for num_threads(num_thread) for (int p = (cout_start & -4); p < (cout_end & -4); p += 4) { int block_hw = block_h * block_w; int out_hw = out_w * out_h; const float* ker_ptr = ker + p * ELEM_SIZE * cin; int i = 0; for (; i < (block_hw & -4); i += 4) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[4 * 4 * ELEM_SIZE]; #ifdef __aarch64__ int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; int wino_out_4_tiles = 0; int mulitplier = 4; if (flag_outw) if ((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1))) { wino_out_4_tiles = 1; mulitplier = 1; } for (int s = 0; s < ELEM_SIZE; s++) { { wino_sgemm_4x4_A72(out_buffer + s * 4 * mulitplier, inp_ptr + s * 4 * cin, ker_ptr + s * 4 * cin, cin, wino_out_4_tiles); } } if (wino_out_4_tiles == 1) { float* bias_ptr = NULL; for (int pss = 0; pss < 4; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE; if (bias) { bias_ptr = ( float* )(bias + cout_idx); } float ker00[4] = {2, 4, 8, 0}; tran_out_4(out_buffer + pss * ELEM_SIZE * 4, out_ptr, out_w * sizeof(float), ker00, bias_ptr, activation); } } else { float buffer[4 * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < 4; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * 4 + pp * 4 + t]; buffer_ptr0++; } } } // end interleave // transform_output_f43_4tile((const float*)buffer, output, p, i, block_h, block_w, out_hw, out_w, // resi_h, resi_w, // KER_COUT_UNIT, bias, bias_term); { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int pss = 0; pss < 4; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw; if (bias) { bias_ptr = bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer + ii * ELEM_SIZE + pss * 36 * 4, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, ( const float* )bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer + ii * ELEM_SIZE + pss * 36 * 4, tmp_buffer, ( const float* )bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff } } } // end transform } #else for (int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x4_A17(out_buffer + s * 4 * 4, inp_ptr + s * 4 * cin, ker_ptr + s * 4 * cin, cin); } // interleave float buffer[4 * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < 4; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * 4 + pp * 4 + t]; buffer_ptr0++; } } } // end interleave transform_output_f43_4tile(buffer, output, p, i, block_h, block_w, out_h, out_w, resi_h, resi_w, 4, bias, activation); #endif } for (; i < block_hw; i++) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[4 * ELEM_SIZE]; for (int s = 0; s < ELEM_SIZE; s++) { #ifdef __aarch64__ wino_sgemm_1x4(out_buffer + s * 4, inp_ptr + s * cin, ker_ptr + s * 4 * cin, cin); #else wino_sgemm_1x4_cpu(out_buffer + s * 4, inp_ptr + s * cin, ker_ptr + s * 4 * cin, cin); #endif } // interleave float buffer[4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < 4; pp++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 + pp]; buffer_ptr0++; } } // end interleave transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, 4, bias, activation); // end transform } } int block_hw = block_h * block_w; int out_hw = out_w * out_h; for (int p = (cout_end & -4); p < cout_end; p++) { const float* ker_ptr = ker + p * ELEM_SIZE * cin; int i = 0; for (; i < (block_hw & -4); i += 4) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float buffer[4 * ELEM_SIZE]; int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; // gemm+interleave buffer[4][36] for (int s = 0; s < ELEM_SIZE; s++) { float* inp_ = ( float* )(inp_ptr + s * 4 * cin); float* ker_ = ( float* )(ker_ptr + s * cin); float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < cin; k++) { sum0 += inp_[k * 4] * ker_[k]; sum1 += inp_[k * 4 + 1] * ker_[k]; sum2 += inp_[k * 4 + 2] * ker_[k]; sum3 += inp_[k * 4 + 3] * ker_[k]; } buffer[s] = sum0; buffer[36 + s] = sum1; buffer[72 + s] = sum2; buffer[108 + s] = sum3; } // trans_out buffer[4][36] float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; float* out_ptr = output + p * out_hw; if (bias) { bias_ptr = bias + p; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer + ii * ELEM_SIZE, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, ( const float* )bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer + ii * ELEM_SIZE, tmp_buffer, ( const float* )bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff } // end transform } for (; i < block_hw; i++) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float buffer[ELEM_SIZE]; for (int s = 0; s < ELEM_SIZE; s++) { float* inp_ = ( float* )(inp_ptr + s * cin); float* ker_ = ( float* )(ker_ptr + s * cin); float sum = 0; for (int k = 0; k < cin; k++) { sum += inp_[k] * ker_[k]; } buffer[s] = sum; } // end interleave transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, 1, bias, activation); // end transform } } } static int get_private_mem_size(struct tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } int wino_conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int output_c = filter_tensor->dims[0]; int input_c = filter_tensor->dims[1]; int mem_size = get_private_mem_size(filter_tensor, param); float* trans_mem = ( float* )sys_malloc(mem_size); if (!priv_info->external_interleave_mem) { void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } transform_kernel_f43_tile(filter_tensor, trans_mem); interleave_kernel(trans_mem, ( float* )priv_info->interleave_buffer, output_c, input_c); sys_free(trans_mem); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } return 0; } int wino_conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input_buf = ( float* )input_tensor->data; float* output_buf = ( float* )output_tensor->data; float* biases_buf = NULL; if (bias_tensor != NULL) biases_buf = ( float* )bias_tensor->data; float* col_buf = ( float* )priv_info->im2col_buffer; float* interleave_buf = ( float* )priv_info->interleave_buffer; float* input_padd_buf = ( float* )sys_malloc(sizeof(float) * padded_in_hw * in_c + 128); float* trans_input_buf = ( float* )sys_malloc(sizeof(float) * block_hw * in_c * ELEM_SIZE + 128); int nn_out_c = out_c / PER_OUT_CHAN * PER_OUT_CHAN; int nn_block = block_hw >> 2; int resi_block = nn_block << 2; int resi_h = block_h * TILE - out_h; int resi_w = block_w * TILE - out_w; for (int n = 0; n < batch; n++) { float* input = input_buf + n * input_size; float* output = output_buf + n * output_size; /* PAD input */ pad_input1(input, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_h0, pad_w0); /* trans input */ tran_input_4block(input_padd_buf, trans_input_buf, in_c, block_h, block_w, padded_in_h, padded_in_w, num_thread); if (resi_block != block_hw) { tran_input_resi_block(input_padd_buf, trans_input_buf, in_c, nn_block, resi_block, block_hw, block_w, padded_in_hw, padded_in_w); } /* sdot */ wino_sgemm_set(interleave_buf, trans_input_buf, output, biases_buf, in_c, nn_out_c, block_h, block_w, out_h, out_w, resi_h, resi_w, act_type, num_thread, cpu_affinity); if (nn_out_c != out_c) { wino_sgemm_4x4(interleave_buf, trans_input_buf, output, biases_buf, in_c, nn_out_c, out_c, block_h, block_w, out_h, out_w, resi_h, resi_w, act_type, num_thread, cpu_affinity); } } sys_free(input_padd_buf); sys_free(trans_input_buf); return 0; }
parallelFileOp.h
#pragma once #include<string> #include<sstream> #include<fstream> #include<unordered_map> #include<unordered_set> #include<list> #include<vector> #include<functional> // needed for posix io #include<cstdio> #include <sys/types.h> #include <sys/stat.h> #include<omp.h> using std::string; using std::stringstream; using std::fstream; using std::ios; using std::unordered_map; using std::unordered_set; using std::list; using std::pair; using std::vector; using std::function; bool defFilter(const string &){ return true; } /* * This function will map each line from the file path and * map it using the provided function. The result is a list * of type T where each line corresponds to an entry in the list. * Note: Result is unordered. * * filePath - the path to parse in parallel. Each line is a record. * result - returned by refernce. New results are appended to this list. * mapper - each line of the input is run through this * filter - determines wether to process each line. Default accepts all. */ template<class T> void fastProcFile(const string& filePath, list<T>& result, function<T(const string&)> mapper, function<bool(const string&)> filter = defFilter) { //get properties of abstract path struct stat st; stat(filePath.c_str(), &st); size_t totalFileSize = st.st_size; vector<size_t> fileStarts; #pragma omp parallel { unsigned int tid = omp_get_thread_num(); unsigned int totalThreadNum = omp_get_num_threads(); size_t bytesPerThread = totalFileSize / totalThreadNum; #pragma omp single { fileStarts = vector<size_t>(totalThreadNum + 1, 0); fileStarts[totalThreadNum] = totalFileSize; } #pragma omp barrier // each thread puts its start position fstream localFile(filePath, ios::in | ios::binary); localFile.seekg(tid * bytesPerThread); string localLine; if(tid > 0){ // jump to next newline getline(localFile, localLine); } fileStarts[tid] = localFile.tellg(); #pragma omp barrier list<T> locRes; // while we are still inside our own section unsigned int numLines = 0; while(localFile.tellg() < fileStarts[tid+1] && localFile){ getline(localFile, localLine); numLines += 1; if(filter(localLine)){ locRes.emplace_back(mapper(localLine)); } } localFile.close(); #pragma omp critical { result.splice(result.end(), locRes); } } }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename LhsScalar_, typename RhsScalar_> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor,ResInnerStride> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper; LhsMapper lhs(_lhs, lhsStride); RhsMapper rhs(_rhs, rhsStride); ResMapper res(_res, resStride, resIncr); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users = threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #if !EIGEN_HAS_CXX11_ATOMIC #pragma omp atomic #endif info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename LhsScalar_, typename RhsScalar_> class level3_blocking { typedef LhsScalar_ LhsScalar; typedef RhsScalar_ RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename LhsScalar_, typename RhsScalar_, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,LhsScalar_,RhsScalar_,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,RhsScalar_,LhsScalar_>::type, typename conditional<StorageOrder==RowMajor,LhsScalar_,RhsScalar_>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,RhsScalar_,LhsScalar_>::type LhsScalar; typedef typename conditional<Transpose,LhsScalar_,RhsScalar_>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename LhsScalar_, typename RhsScalar_, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,LhsScalar_,RhsScalar_,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,RhsScalar_,LhsScalar_>::type, typename conditional<StorageOrder==RowMajor,LhsScalar_,RhsScalar_>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,RhsScalar_,LhsScalar_>::type LhsScalar; typedef typename conditional<Transpose,LhsScalar_,RhsScalar_>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program // to determine the following heuristic. // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, // unless it has been specialized by the user or for a given architecture. // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs. // I'm not sure it is still required. if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>()); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; if (dst.cols() == 1) { // Fallback to GEMV if either the lhs or rhs is a runtime vector typename Dest::ColXpr dst_vec(dst.col(0)); return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct> ::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha); } else if (dst.rows() == 1) { // Fallback to GEMV if either the lhs or rhs is a runtime vector typename Dest::RowXpr dst_vec(dst.row(0)); return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct> ::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha); } typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = combine_scalar_factors(alpha, a_lhs, a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
adListShared.h
#ifndef ADLISTSHARED_H_ #define ADLISTSHARED_H_ #include <iostream> #include <thread> #include <stdlib.h> #include <mutex> #include "omp.h" #include <cassert> // #include<memory> #include "x86_full_empty.h" #include "stinger_atomics.h" #include "abstract_data_struc.h" #include "print.h" #include "common.h" bool compare_and_swap(bool &x, const bool &old_val, const bool &new_val); // T can be either node or nodeweight template <typename T> class adListShared: public dataStruc { private: void updateExistingEdge(NodeID self, unsigned int index, T new_neighbor, bool in_neighbor); void search_and_insert_edge(const Edge& e, bool source, std::vector<T> &neighborList, bool in_neighbor); void updateForExistingVertex(const Edge& e, bool source); void processMetaData(const Edge& e, bool source); std::vector<std::unique_ptr<std::mutex>> in_mutex, out_mutex; int64_t num_nodes_initialize; public: std::vector<std::vector<T>> out_neighbors; std::vector<std::vector<T>> in_neighbors; adListShared(bool w, bool d, int64_t _num_nodes, int _num_threads); void update(const EdgeList& el) override; void print() override; int64_t in_degree(NodeID n) override; int64_t out_degree(NodeID n) override; }; template <typename T> adListShared<T>::adListShared(bool w, bool d, int64_t _num_nodes, int num_threads): dataStruc(w, d), num_nodes_initialize(_num_nodes){ #ifdef _OPENMP if(num_threads > 0){ omp_set_num_threads(num_threads); } #endif // initialize 1) property 2) affected 3) vertices vectors 4) mutex property.resize(num_nodes_initialize, -1); affected.resize(num_nodes_initialize); affected.fill(false); out_neighbors.resize(num_nodes_initialize); in_neighbors.resize(num_nodes_initialize); // Malloc for mutex. out_mutex.resize(num_nodes_initialize); in_mutex.resize(num_nodes_initialize); for (unsigned int k = 0; k< num_nodes_initialize; k++){ out_mutex[k].reset(new std::mutex()); in_mutex[k].reset(new std::mutex()); } } template <typename T> void adListShared<T>::processMetaData(const Edge& e, bool source) { bool exists; if(source) exists = e.sourceExists; else exists = e.destExists; // using CAS operations implemented in GAP if(source){ NodeID v = e.source; bool aff = affected[v]; if(!aff){ compare_and_swap(affected[v], aff, true); } } else{ NodeID v = e.destination; bool aff = affected[v]; if(!aff){ compare_and_swap(affected[v], aff, true); } } if(exists){ stinger_int64_fetch_add(&num_edges, 1); } else{ stinger_int64_fetch_add(&num_nodes, 1); stinger_int64_fetch_add(&num_edges, 1); } } template <typename T> void adListShared<T>::updateForExistingVertex(const Edge& e, bool source) { NodeID index; if(source) index = e.source; else index = e.destination; if (source || (!source && !directed)) { NodeID dest; if(source) dest = e.destination; else dest = e.source; // guard the mutex std::lock_guard<std::mutex> guard(*out_mutex[index]); NodeID temp; if(!e.isDelete){ //insTot++; //insert //search for the edge first in out_neighbors int64_t foundEmptySlot = -1; for (unsigned int i = 0; i < out_neighbors[index].size(); i++) { temp = out_neighbors[index][i].getNodeID(); if (temp == dest) { out_neighbors[index][i].setInfo(dest, e.weight); return; } // Mark a deleted one. if (temp == -1 && foundEmptySlot == -1){ foundEmptySlot = i; } } //insSucc++; if (foundEmptySlot == -1) { T neighbor; neighbor.setInfo(dest, e.weight); out_neighbors[index].push_back(neighbor); return; } // Go into the slot out_neighbors[index][foundEmptySlot].setInfo(dest, e.weight); return; } else{ //delete //delTot++; for (unsigned int i = 0; i < out_neighbors[index].size(); i++) { temp = out_neighbors[index][i].getNodeID(); if (temp == dest) { //found, mark as delete //delSucc++; out_neighbors[index][i].setInfo(-1, -1); return; } } } } else if (!source && directed) { // in_neighbors NodeID temp; // guard the mutex std::lock_guard<std::mutex> guard(*in_mutex[index]); if(!e.isDelete){ //insTot++; //insert // search for the edge first in in_neighbors int64_t foundEmptySlot = -1; for(unsigned int i = 0; i < in_neighbors[index].size(); i++){ temp = in_neighbors[index][i].getNodeID(); if(temp == e.source){ in_neighbors[index][i].setInfo(e.source, e.weight); return; } // Mark a deleted one. if (temp == -1 && foundEmptySlot == -1) { foundEmptySlot = i; } } //insSucc++; if (foundEmptySlot == -1) { T neighbor; neighbor.setInfo(e.source, e.weight); in_neighbors[index].push_back(neighbor); return; } // Go into the slot in_neighbors[index][foundEmptySlot].setInfo(e.source, e.weight); return; } else{ //delete //delTot++; for(unsigned int i = 0; i < in_neighbors[index].size(); i++){ temp = in_neighbors[index][i].getNodeID(); if(temp == e.source){ //found, mark as deleted //delSucc++; in_neighbors[index][i].setInfo(-1, -1); return; } } } } } template <typename T> void adListShared<T>::update(const EdgeList& el) { #ifndef LIKWID_PERFMON # pragma omp parallel for // for(auto it=el.begin(); it!=el.end(); it++){ for (unsigned int k = 0; k < el.size(); k ++) { // examine source vertex //bool exists = vertexExists(*it, true); //if(!exists) updateForNewVertex(*it, true); processMetaData(el[k], true); updateForExistingVertex(el[k], true); // examine destination vertex //bool exists1 = vertexExists(*it, false); //if(!exists1) updateForNewVertex(*it, false); processMetaData(el[k], false); updateForExistingVertex(el[k], false); } #else # pragma omp parallel { LIKWID_MARKER_START("upd"); // for(auto it=el.begin(); it!=el.end(); it++){ #pragma omp for for (unsigned int k = 0; k < el.size(); k ++) { // examine source vertex //bool exists = vertexExists(*it, true); //if(!exists) updateForNewVertex(*it, true); processMetaData(el[k], true); updateForExistingVertex(el[k], true); // examine destination vertex //bool exists1 = vertexExists(*it, false); //if(!exists1) updateForNewVertex(*it, false); processMetaData(el[k], false); updateForExistingVertex(el[k], false); } LIKWID_MARKER_STOP("upd"); } #endif } template <typename T> int64_t adListShared<T>::in_degree(NodeID n) { if(directed) { std::lock_guard<std::mutex> guard(*in_mutex[n]); return in_neighbors[n].size(); } else { std::lock_guard<std::mutex> guard(*out_mutex[n]); return out_neighbors[n].size(); } } template <typename T> int64_t adListShared<T>::out_degree(NodeID n) { std::lock_guard<std::mutex> guard(*out_mutex[n]); return out_neighbors[n].size(); } template <typename T> void adListShared<T>::print() { // std::cout << "Inserts--------------------" << std::endl; // std::cout << " Total: " << insTot << std::endl; // std::cout << " Succ : " << insSucc << std::endl; // std::cout << " Fail : " << insTot - insSucc << std::endl; // std::cout << std::endl; // // std::cout << "Deletes--------------------" << std::endl; // std::cout << " Total: " << delTot << std::endl; // std::cout << " Succ : " << delSucc << std::endl; // std::cout << " Fail : " << delTot - delSucc << std::endl; // std::cout << std::endl; // // std::cout << "Final number of edges: " << insSucc - delSucc << std::endl; } //template <typename T> //void adListShared<T>::print() //{ // std::cout << " numNodes: " << num_nodes << // " numEdges: " << num_edges << // " weighted: " << weighted << // " directed: " << directed << // std::endl; // // /*cout << "Property: "; printVector(property); // cout << "out_neighbors: " << endl; printVecOfVecOfNodes(out_neighbors); // cout << "in_neighbors: " << endl; printVecOfVecOfNodes(in_neighbors);*/ //} #endif // ADLISTSHARED_H_ // #ifndef ADLIST_H_ // #define ADLIST_H_ // #include <iostream> // #include <iomanip> // #include <stdlib.h> // #include "abstract_data_struc.h" // #include "print.h" // #include <cassert> // // #include<memory> // #include "x86_full_empty.h" // #include "stinger_atomics.h" // using namespace std; // bool compare_and_swap(bool &x, const bool &old_val, const bool &new_val); // //class adListPart<T>; // // T can be either node or nodeweight // template <typename T> // class adList: public dataStruc { // // friend class adListPart<T>; // private: // int64_t num_nodes_initialize; // The max amount of nodes we would initialize. // void processMetaData(const Edge& e, bool source); // void updateForVertex(const Edge& e, bool source); // void updateExistingEdge(NodeID self, unsigned int index, T new_neighbor, bool in_neighbor); // void search_and_insert_edge(const Edge& e, bool source, std::vector<T> &neighborList, bool in_neighbor); // // vector<bool> in_markers; // // vector<bool> out_markers; // bool* in_markers; // bool* out_markers; // public: // std::vector<std::vector<T>> out_neighbors; // std::vector<std::vector<T>> in_neighbors; // // vector<T>* out_neighbors, in_neighbors; // adList(bool w, bool d, int64_t _num_nodes); // adList(bool w, bool d); // ~adList(); // void update(const EdgeList& el) override; // void print() override; // int64_t in_degree(NodeID n) override; // int64_t out_degree(NodeID n) override; // }; // template <typename T> // adList<T>::adList(bool w, bool d, int64_t _num_nodes) // : dataStruc(w, d){ // num_nodes_initialize = _num_nodes; // // initialize 1) property 2) affected 3) vertices vectors 4) markers // property.resize(num_nodes_initialize, -1); // affected.resize(num_nodes_initialize); affected.fill(false); // out_neighbors.resize(num_nodes_initialize); // in_neighbors.resize(num_nodes_initialize); // // out_neighbors = new vector<T>[num_nodes_initialize]; // // in_neighbors = new vector<T>[num_nodes_initialize]; // in_markers = new bool[num_nodes_initialize]; // out_markers = new bool[num_nodes_initialize]; // //in_markers.resize(num_nodes_initialize); // //out_markers.resize(num_nodes_initialize); // for (unsigned int k = 0; k < num_nodes_initialize; k++){ // in_markers[k] = 1; // out_markers[k] = 1; // } // } // template <typename T> // adList<T>::~adList(){ // delete []in_markers; // delete []out_markers; // // delete []out_neighbors; // // delete []in_neighbors; // } // template <typename T> // void adList<T>::processMetaData(const Edge& e, bool source) // { // bool exists; // if(source) exists = e.sourceExists; // else exists = e.destExists; // // using CAS operations implemented in GAP // if(source){ // NodeID v = e.source; // bool aff = affected[v]; // if(!aff){ // compare_and_swap(affected[v], aff, true); // } // } // else{ // NodeID v = e.destination; // bool aff = affected[v]; // if(!aff){ // compare_and_swap(affected[v], aff, true); // } // } // if(exists){ // stinger_int64_fetch_add(&num_edges, 1); // } // else{ // stinger_int64_fetch_add(&num_nodes, 1); // stinger_int64_fetch_add(&num_edges, 1); // } // } // template <typename T> // void adList<T>::updateExistingEdge(NodeID self, unsigned int index, T new_neighbor, bool in_neighbor){ // if (weighted){ // if (in_neighbor) // in_neighbors[self][index].setInfo(new_neighbor.getNodeID(), new_neighbor.getWeight()); // else // out_neighbors[self][index].setInfo(new_neighbor.getNodeID(), new_neighbor.getWeight()); // } // } // template <typename T> // void adList<T>::search_and_insert_edge(const Edge& e, bool source, std::vector<T> &neighborList, bool in_neighbor){ // //create a new T // T neighbor; // NodeID selfID; // if (source) { // neighbor.setInfo(e.destination, e.weight); // selfID = e.source; // } // else { // neighbor.setInfo(e.source, e.weight); // selfID = e.destination; // } // // 1. search whether the edge exists // bool* markerPtr; // pointer to the marker // if (selfID >= num_nodes_initialize) cout << "wrong ID" <<endl; // if (in_neighbor){ // markerPtr = in_markers + selfID; // } // else{ // markerPtr = out_markers + selfID; // } // // start from the beginning // bool _adr = readfe_bool(markerPtr); // unsigned int len = neighborList.size(); // // writexf_bool(markerPtr, _adr); // for ( unsigned int k = 0; k < len; k++) { // if (neighborList[k].getNodeID() == neighbor.getNodeID()) { // // found the existing edge // // bool _adr = readfe_bool(markerPtr); // updateExistingEdge(selfID, k, neighbor, in_neighbor); // writexf_bool(markerPtr, _adr); // return ; // } // } // // 2. Not found, try to insert // // _adr = readfe_bool(markerPtr); // // for (unsigned int k = 0; k < neighborList.size(); k++ ){ // // if (neighborList[k].getNodeID() == neighbor.getNodeID()) { // // updateExistingEdge(selfID, k, neighbor, in_neighbor); // // writexf_bool(markerPtr, _adr); // // return; // // } // // else if (neighborList[k].getNodeID() == -1) { // // updateExistingEdge(selfID, k, neighbor, in_neighbor); // // writexf_bool(markerPtr, _adr); // // return; // // } // // } // neighborList.push_back(neighbor); // writexf_bool(markerPtr, _adr); // return; // } // template <typename T> // void adList<T>::updateForVertex(const Edge& e, bool source){ // NodeID index; // std::vector<T> neighbors; // if(source || !directed) { // index = e.source; // //cout << index << endl; // neighbors = out_neighbors[index]; // } // else { // index = e.destination; // neighbors = in_neighbors[index]; // } // if(source || !directed){ // search_and_insert_edge(e, source, out_neighbors[index], false); // } // else if(!source && directed){ // search_and_insert_edge(e, source, in_neighbors[index], true); // } // } // template <typename T> // void adList<T>::update(const EdgeList& el) // { // cout<<"begin update"<<endl; // # pragma omp parallel for // for(unsigned int i=0; i<el.size(); i++){ // // examine source vertex // processMetaData(el[i], true); // updateForVertex(el[i], true); // // examine destination vertex // processMetaData(el[i], false); // updateForVertex(el[i], false); // } // cout<<"end update"<<endl; // } // template <typename T> // int64_t adList<T>::in_degree(NodeID n) // { // NodeID id = n; // if(directed) { // bool* markerPtr; // markerPtr = &(in_markers[id]); // bool _adr = readfe_bool(markerPtr); // int64_t size = in_neighbors[id].size(); // writexf_bool(markerPtr, _adr); // return size; // } // else { // bool* markerPtr; // markerPtr = &out_markers[id]; // bool _adr = readfe_bool(markerPtr); // int64_t size = out_neighbors[id].size(); // writexf_bool(markerPtr, _adr); // return size; // } // } // template <typename T> // int64_t adList<T>::out_degree(NodeID n) // { // NodeID id = n; // bool* markerPtr; // markerPtr = &out_markers[id]; // bool _adr = readfe_bool(markerPtr); // int64_t size = out_neighbors[id].size(); // writexf_bool(markerPtr, _adr); // return size; // } // template <typename T> // void adList<T>::print() // { // std::cout << " numNodes: " << num_nodes << // " numEdges: " << num_edges << // " weighted: " << weighted << // " directed: " << directed << // std::endl; // /*cout << "Property: "; printVector(property); // cout << "out_neighbors: " << endl; printVecOfVecOfNodes(out_neighbors); // cout << "in_neighbors: " << endl; printVecOfVecOfNodes(in_neighbors);*/ // } // #endif // ADLIST_H_
edges.c
/* Author: Mohammed Ahmed Al Farhan Email: mohammed.farhan@kaust.edu.sa */ #include <stdio.h> #include <stdint.h> #include <omp.h> #include "inc/allocator.h" #include "inc/geometry.h" #include "inc/msh/mesh.h" #include "inc/msh/fio.h" #include "inc/msh/index.h" /* Allocate the edges */ size_t emalloc(char *restrict fbuf, struct etbl *restrict e) { const size_t sz = e->sz; // Number of edges const size_t ndsz = sz * 2; // Number of edges' endpoints uint32_t *restrict buf0; kmalloc(ndsz, sizeof(uint32_t), (void *) &buf0); size_t bytes0 = ndsz * sizeof(uint32_t); struct wtbl w0; { w0.l = fbuf; w0.h = fbuf + bytes0; w0.t = UINT; w0.sz = ndsz; } walkfbuf(&w0, buf0); const size_t nrsz = sz * 4; double *restrict buf1; kmalloc(nrsz, sizeof(double), (void *) &buf1); size_t bytes1 = nrsz * sizeof(double); struct wtbl w1; { w1.l = w0.h; w1.h = w0.h + bytes1; w1.t = DOUBLE; w1.sz = nrsz; } walkfbuf(&w1, buf1); // Find the permutation array of a sorted sequence to reorder the // edges and their normals uint32_t *restrict p; kmalloc(sz, sizeof(uint32_t), (void *) &p); imain(sz, buf0, p); // Reorder the edge endpoints and their normals uint32_t i; #pragma omp parallel for for(i = 0; i < sz; i++) { // Edge endpoints e->eptr->n0[i] = buf0[p[i]] - 1; // From Fortran to C e->eptr->n1[i] = buf0[p[i] + sz] - 1; // From Fortran to C // Unit normals of dual faces and area of the dual mesh face e->xyzn->x0[i] = buf1[p[i]]; e->xyzn->x1[i] = buf1[p[i] + sz]; e->xyzn->x2[i] = buf1[p[i] + sz + sz]; e->xyzn->x3[i] = buf1[p[i] + sz + sz + sz]; } kfree(buf0); kfree(buf1); kfree(p); return (bytes0 + bytes1); }
seeta_aip_image.h
// // Created by kier on 2020/9/27. // #ifndef SEETA_AIP_SEETA_AIP_IMAGE_H #define SEETA_AIP_SEETA_AIP_IMAGE_H #include "seeta_aip_struct.h" namespace seeta { namespace aip { namespace _ { template<typename SRC, typename DST, typename=typename std::enable_if<std::is_convertible<SRC, DST>::value>::type> static inline void cast(int threads, const SRC *src, DST *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { dst[i] = DST(src[i]); } } else { for (decltype(N) i = 0; i < N; ++i) { *dst++ = DST(*src++); } } } template<typename SRC, typename DST, typename=typename std::enable_if<std::is_convertible<SRC, DST>::value>::type> static inline void cast(int threads, const SRC *src, DST *dst, int32_t N, SRC scale) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { dst[i] = DST(scale * src[i]); } } else { for (decltype(N) i = 0; i < N; ++i) { *dst++ = DST(scale * *src++); } } } template<typename DST> static inline void cast_to(int threads, const void *src, SEETA_AIP_VALUE_TYPE src_type, DST *dst, uint32_t N, float data_scale = 1) { switch (src_type) { default: case SEETA_AIP_VALUE_VOID: break; case SEETA_AIP_VALUE_BYTE: cast<uint8_t, DST>(threads, reinterpret_cast<const uint8_t *>(src), dst, int32_t(N)); break; case SEETA_AIP_VALUE_INT32: cast<int32_t, DST>(threads, reinterpret_cast<const int32_t *>(src), dst, int32_t(N)); break; case SEETA_AIP_VALUE_FLOAT32: cast<float, DST>(threads, reinterpret_cast<const float *>(src), dst, int32_t(N), data_scale); break; case SEETA_AIP_VALUE_FLOAT64: cast<double, DST>(threads, reinterpret_cast<const double *>(src), dst, int32_t(N), data_scale); break; } } static inline size_t value_type_width(SEETA_AIP_VALUE_TYPE type) { switch (type) { default: case SEETA_AIP_VALUE_VOID: return 0; case SEETA_AIP_VALUE_BYTE: return 1; case SEETA_AIP_VALUE_FLOAT32: case SEETA_AIP_VALUE_INT32: return 4; case SEETA_AIP_VALUE_FLOAT64: return 8; } } } static inline void cast(int threads, const void *src, SEETA_AIP_VALUE_TYPE src_type, void *dst, SEETA_AIP_VALUE_TYPE dst_type, uint32_t N, float data_scale = 1) { if (src_type == dst_type) { if (src != dst) { std::memcpy(dst, src, _::value_type_width(src_type) * N); } return; } switch (dst_type) { default: case SEETA_AIP_VALUE_VOID: break; case SEETA_AIP_VALUE_BYTE: _::cast_to<uint8_t>(threads, src, src_type, reinterpret_cast<uint8_t *>(dst), N, data_scale); break; case SEETA_AIP_VALUE_INT32: _::cast_to<int32_t>(threads, src, src_type, reinterpret_cast<int32_t *>(dst), N, data_scale); break; case SEETA_AIP_VALUE_FLOAT32: _::cast_to<float>(threads, src, src_type, reinterpret_cast<float *>(dst), N, data_scale); break; case SEETA_AIP_VALUE_FLOAT64: _::cast_to<double>(threads, src, src_type, reinterpret_cast<double *>(dst), N, data_scale); break; } } namespace _ { static inline SEETA_AIP_IMAGE_FORMAT casted_format(SEETA_AIP_IMAGE_FORMAT original_format, SEETA_AIP_VALUE_TYPE casted_type) { (void) (original_format); switch (casted_type) { default: throw seeta::aip::Exception("Unknown value type."); case SEETA_AIP_VALUE_VOID: throw seeta::aip::Exception("There is no image format VOID"); case SEETA_AIP_VALUE_BYTE: return SEETA_AIP_FORMAT_U8RAW; case SEETA_AIP_VALUE_INT32: return SEETA_AIP_FORMAT_I32RAW; case SEETA_AIP_VALUE_FLOAT32: return SEETA_AIP_FORMAT_F32RAW; case SEETA_AIP_VALUE_FLOAT64: throw seeta::aip::Exception("There is no image format FLOAT64"); } } enum cvt_format { NO_CONVERT, BGR2RGB, BGR2BGRA, BGR2RGBA, BGRA2BGR, BGRA2RGB, BGRA2RGBA, Y2BGR, Y2BGRA, BGR2Y, BGRA2Y, RGB2Y, RGBA2Y, }; static inline cvt_format serch_u8image_cvt_format(SEETA_AIP_IMAGE_FORMAT src, SEETA_AIP_IMAGE_FORMAT dst) { // U8RGB, U8BGR, U8RGBA, U8BGRA, U8Y static cvt_format table[5][5] = { {NO_CONVERT, BGR2RGB, BGR2BGRA, BGR2RGBA, RGB2Y}, {BGR2RGB, NO_CONVERT, BGR2RGBA, BGR2BGRA, BGR2Y}, {BGRA2BGR, BGRA2RGB, NO_CONVERT, BGRA2RGBA, RGBA2Y}, {BGRA2RGB, BGRA2BGR, BGRA2RGBA, NO_CONVERT, BGRA2Y}, {Y2BGR, Y2BGR, Y2BGRA, Y2BGRA, NO_CONVERT}, }; int i = src - 1001; int j = dst - 1001; return table[i][j]; } static inline void _bgr2rgb(const uint8_t *src, uint8_t *dst) { auto tmp = src[0]; dst[0] = src[2]; dst[1] = src[1]; dst[2] = tmp; } static inline void convert_uimage_bgr2rgb(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { const auto N3 = N * 3; #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) n = 0; n < N3; n += 3) { _bgr2rgb(src + n, dst + n); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 3) { _bgr2rgb(src, dst); } } } static inline void _bgr2bgra(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = 0; } static inline void convert_uimage_bgr2bgra(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2bgra(src + i * 3, dst + i * 4); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 4) { _bgr2bgra(src, dst); } } } static inline void _bgr2rgba(const uint8_t *src, uint8_t *dst) { dst[0] = src[2]; dst[1] = src[1]; dst[2] = src[0]; dst[3] = 0; } static inline void convert_uimage_bgr2rgba(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2rgba(src + i * 3, dst + i * 4); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 4) { _bgr2rgba(src, dst); } } } static inline void _bgra2bgr(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; } static inline void convert_uimage_bgra2bgr(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgra2bgr(src + i * 4, dst + i * 3); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 3) { _bgra2bgr(src, dst); } } } static inline void _bgra2rgb(const uint8_t *src, uint8_t *dst) { dst[0] = src[2]; dst[1] = src[1]; dst[2] = src[0]; } static inline void convert_uimage_bgra2rgb(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgra2rgb(src + i * 4, dst + i * 3); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 3) { _bgra2rgb(src, dst); } } } static inline void _bgra2rgba(const uint8_t *src, uint8_t *dst) { auto tmp = src[0]; dst[0] = src[2]; dst[1] = src[1]; dst[2] = tmp; dst[3] = src[3]; } static inline void convert_uimage_bgra2rgba(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { const auto N4 = N * 4; #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) n = 0; n < N4; n += 4) { _bgra2rgba(src + n, dst + n); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 4) { _bgra2rgba(src, dst); } } } static inline void _y2bgr(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[0]; dst[2] = src[0]; } static inline void convert_uimage_y2bgr(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _y2bgr(src + i, dst + i * 3); } } else { for (decltype(N) i = 0; i < N; ++i, src += 1, dst += 3) { _y2bgr(src, dst); } } } static inline void _y2bgra(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[0]; dst[2] = src[0]; dst[3] = 0; } static inline void convert_uimage_y2bgra(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _y2bgra(src + i, dst + i * 4); } } else { for (decltype(N) i = 0; i < N; ++i, src += 1, dst += 4) { _y2bgra(src, dst); } } } static inline void _bgr2y(const uint8_t *src, uint8_t *dst) { auto B = src[0]; auto G = src[1]; auto R = src[2]; auto Y = (R * uint32_t(19595) + G * uint32_t(38469) + B * uint32_t(7472)) >> 16; dst[0] = Y > 255 ? 255 : Y; } static inline void convert_uimage_bgr2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2y(src + i * 3, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 1) { _bgr2y(src, dst); } } } static inline void convert_uimage_bgra2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2y(src + i * 4, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 1) { _bgr2y(src, dst); } } } static inline void _rgb2y(const uint8_t *src, uint8_t *dst) { auto B = src[2]; auto G = src[1]; auto R = src[0]; auto Y = (R * uint32_t(19595) + G * uint32_t(38469) + B * uint32_t(7472)) >> 16; dst[0] = Y > 255 ? 255 : Y; } static inline void convert_uimage_rgb2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _rgb2y(src + i * 3, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 1) { _rgb2y(src, dst); } } } static inline void convert_uimage_rgba2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _rgb2y(src + i * 4, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 1) { _rgb2y(src, dst); } } } static inline void convert_uimage(int threads, cvt_format cvt_code, const void *src, void *dst, uint32_t pixel_number) { static decltype(convert_uimage_bgr2rgb) *converter[] = { nullptr, convert_uimage_bgr2rgb, convert_uimage_bgr2bgra, convert_uimage_bgr2rgba, convert_uimage_bgra2bgr, convert_uimage_bgra2rgb, convert_uimage_bgra2rgba, convert_uimage_y2bgr, convert_uimage_y2bgra, convert_uimage_bgr2y, convert_uimage_bgra2y, convert_uimage_rgb2y, convert_uimage_rgba2y, }; auto func = converter[cvt_code]; if (!func) return; func(threads, reinterpret_cast<const uint8_t *>(src), reinterpret_cast<uint8_t *>(dst), int32_t(pixel_number)); } static inline void convert_u8image(int threads, const void *src_data, void *dst_data, uint32_t src_channels, uint32_t dst_channels, SEETA_AIP_IMAGE_FORMAT src_format, SEETA_AIP_IMAGE_FORMAT dst_format, uint32_t pixel_number) { if ((src_format < 1000 || dst_format < 1000) && src_channels != dst_channels) { throw seeta::aip::Exception("Can not convert mismatch channel images with format U8Raw"); } if (src_format < 1000 || dst_format < 1000) { // raw no need to channels swap if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels); } return; } auto cvt_code = _::serch_u8image_cvt_format(src_format, dst_format); if (cvt_code != 0) { _::convert_uimage(threads, cvt_code, src_data, dst_data, pixel_number); } else { // no swap, so copy input to output if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels); } } } static inline void convert_f32image(int threads, const void *src_data, void *dst_data, uint32_t src_channels, uint32_t dst_channels, SEETA_AIP_IMAGE_FORMAT src_format, SEETA_AIP_IMAGE_FORMAT dst_format, uint32_t pixel_number) { if (src_format != SEETA_AIP_FORMAT_F32RAW || dst_format != SEETA_AIP_FORMAT_F32RAW) { throw seeta::aip::Exception("Float image format only support F32Raw for now."); } if (src_channels != dst_channels) { throw seeta::aip::Exception("Can not convert mismatch channel images with format F32Raw"); } if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels * 4); } } static inline void convert_i32image(int threads, const void *src_data, void *dst_data, uint32_t src_channels, uint32_t dst_channels, SEETA_AIP_IMAGE_FORMAT src_format, SEETA_AIP_IMAGE_FORMAT dst_format, uint32_t pixel_number) { if (src_format != SEETA_AIP_FORMAT_I32RAW || dst_format != SEETA_AIP_FORMAT_I32RAW) { throw seeta::aip::Exception("Integer image format only support I32Raw for now."); } if (src_channels != dst_channels) { throw seeta::aip::Exception("Can not convert mismatch channel images with format I32Raw"); } if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels * 4); } } static inline uint32_t offset4d(const uint32_t *shape, uint32_t dim1, uint32_t dim2, uint32_t dim3, uint32_t dim4) { return ((dim1 * shape[1] + dim2) * shape[2] + dim3) * shape[3] + dim4; } template <typename T> static inline void permute4d( T *dst, const T *src, const uint32_t *shape, uint32_t dim1, uint32_t dim2, uint32_t dim3, uint32_t dim4) { std::vector<uint32_t> dim(4), redim(4), idx(4); dim[0] = dim1; redim[dim[0]] = 0; dim[1] = dim2; redim[dim[1]] = 1; dim[2] = dim3; redim[dim[2]] = 2; dim[3] = dim4; redim[dim[3]] = 3; std::vector<int> new_dims(4); for (int i = 0; i < 4; ++i) new_dims[i] = shape[dim[i]]; int cnt = 0; for (idx[0] = 0; idx[0] < shape[dim[0]]; ++idx[0]) { for (idx[1] = 0; idx[1] < shape[dim[1]]; ++idx[1]) { for (idx[2] = 0; idx[2] < shape[dim[2]]; ++idx[2]) { for (idx[3] = 0; idx[3] < shape[dim[3]]; ++idx[3]) { dst[cnt] = src[offset4d( shape, idx[redim[0]], idx[redim[1]], idx[redim[2]], idx[redim[3]])]; cnt++; } } } } } static inline void permute4d( SEETA_AIP_VALUE_TYPE type, void *dst, const void *src, const uint32_t *shape, uint32_t dim1, uint32_t dim2, uint32_t dim3, uint32_t dim4) { switch (type) { default: throw seeta::aip::Exception("Got unknown value type."); case SEETA_AIP_VALUE_VOID: break; case SEETA_AIP_VALUE_BYTE: { using T = uint8_t; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } case SEETA_AIP_VALUE_INT32: { using T = int32_t ; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } case SEETA_AIP_VALUE_FLOAT32: { using T = float; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } case SEETA_AIP_VALUE_FLOAT64: { using T = double; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } } } } inline uint32_t image_format_element_width(SEETA_AIP_IMAGE_FORMAT format) { auto type = seeta::aip::ImageData::GetType(format); switch (type) { default: return 0; case SEETA_AIP_VALUE_VOID: return 0; case SEETA_AIP_VALUE_BYTE: return 1; case SEETA_AIP_VALUE_FLOAT32: return 4; case SEETA_AIP_VALUE_INT32: return 4; case SEETA_AIP_VALUE_FLOAT64: return 8; } } static inline void convert(int threads, SeetaAIPImageData src, SeetaAIPImageData dst, float data_scale = 255.0) { if (src.format == dst.format) { auto SRC_N = src.number * src.height * src.width; auto DST_N = dst.number * dst.height * dst.width; if (SRC_N != DST_N) { throw seeta::aip::Exception("Convert image pixels' number must be equal."); } auto src_format = SEETA_AIP_IMAGE_FORMAT(src.format); auto type_width = image_format_element_width(src_format); auto src_data = src.data; auto dst_data = dst.data; auto src_channels = seeta::aip::ImageData::GetChannels(src_format, src.channels); auto dst_channels = seeta::aip::ImageData::GetChannels(src_format, dst.channels); if (src_channels != dst_channels) { throw seeta::aip::Exception("Convert images' channels must be equal with format are same."); } std::memcpy(dst_data, src_data, SRC_N * src_channels * type_width); return; } if ((src.format & 0xffff0000) == 0x80000) { // CHW format seeta::aip::ImageData tmp( SEETA_AIP_IMAGE_FORMAT(src.format & 0x0000ffff), src.number, src.width, src.height, src.channels); uint32_t chw_shape[] = {src.number, src.channels, src.height, src.width }; _::permute4d(tmp.type(), tmp.data(), src.data, chw_shape, 0, 2, 3, 1); convert(threads, tmp, dst); return; } if ((dst.format & 0xffff0000) == 0x80000) { // CHW format seeta::aip::ImageData tmp( SEETA_AIP_IMAGE_FORMAT(dst.format & 0x0000ffff), dst.number, dst.width, dst.height, dst.channels); convert(threads, src, tmp); uint32_t hwc_shape[] = {dst.number, dst.height, dst.width, dst.channels }; _::permute4d(tmp.type(), dst.data, tmp.data(), hwc_shape, 0, 3, 1, 2); return; } auto SRC_N = src.number * src.height * src.width; auto DST_N = dst.number * dst.height * dst.width; if (SRC_N != DST_N) { throw seeta::aip::Exception("Convert image pixels' number must be equal."); } auto src_format = SEETA_AIP_IMAGE_FORMAT(src.format); auto dst_format = SEETA_AIP_IMAGE_FORMAT(dst.format); auto src_type = seeta::aip::ImageData::GetType(src_format); auto dst_type = seeta::aip::ImageData::GetType(dst_format); auto src_data = src.data; auto dst_data = dst.data; auto src_channels = seeta::aip::ImageData::GetChannels(src_format, src.channels); auto dst_channels = seeta::aip::ImageData::GetChannels(dst_format, dst.channels); // step1. convert dtype std::shared_ptr<char> buffer; if (src_type != dst_type) { if (src_channels == dst_channels) { // for output channels same with input channels cast(threads, src.data, src_type, dst.data, dst_type, SRC_N * src_channels, data_scale); src_type = dst_type; src_data = dst.data; src_format = _::casted_format(src_format, dst_type); } else { // use buffer to convert data buffer.reset(new char[SRC_N * src_channels * _::value_type_width(dst_type)], std::default_delete<char[]>()); cast(threads, src.data, src_type, buffer.get(), dst_type, SRC_N * src_channels, data_scale); src_type = dst_type; src_data = buffer.get(); src_format = _::casted_format(src_format, dst_type); } } // step2. swap channels switch (dst_type) { default: break; case SEETA_AIP_VALUE_BYTE: _::convert_u8image(threads, src_data, dst_data, src_channels, dst_channels, src_format, dst_format, SRC_N); break; case SEETA_AIP_VALUE_INT32: _::convert_i32image(threads, src_data, dst_data, src_channels, dst_channels, src_format, dst_format, SRC_N); break; case SEETA_AIP_VALUE_FLOAT32: _::convert_f32image(threads, src_data, dst_data, src_channels, dst_channels, src_format, dst_format, SRC_N); break; } } /** * Use `threads` to convert image to `format`. * @param threads using threads(OpenMP) * @param format wanted format * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, SeetaAIPImageData image, float data_scale = 255.0) { seeta::aip::ImageData converted(format, image.number, image.width, image.height, seeta::aip::ImageData::GetChannels(format, image.channels)); convert(threads, image, SeetaAIPImageData(converted), data_scale); return converted; } /** * Use `threads` to convert image to `format`. * @param threads using threads(OpenMP) * @param format wanted format * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, const seeta::aip::ImageData &image, float data_scale = 255.0) { return convert(threads, format, SeetaAIPImageData(image), data_scale); } /** * Use `threads` to convert image to `format`.:q:q * * @param threads using threads(OpenMP) * @param format wanted format * @param align memory align * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, const ImageAlign &align, SeetaAIPImageData image, float data_scale = 255.0) { seeta::aip::ImageData converted(format, align, image.number, image.width, image.height, seeta::aip::ImageData::GetChannels(format, image.channels)); convert(threads, image, SeetaAIPImageData(converted), data_scale); return converted; } /** * Use `threads` to convert image to `format`. * @param threads using threads(OpenMP) * @param format wanted format * @param align memory align * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, const ImageAlign &align, const seeta::aip::ImageData &image, float data_scale = 255.0) { return convert(threads, format, align, SeetaAIPImageData(image), data_scale); } } } #endif //SEETA_AIP_SEETA_AIP_IMAGE_H
GB_binop__first_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_int8 // A.*B function (eWiseMult): GB_AemultB__first_int8 // A*D function (colscale): GB_AxD__first_int8 // D*A function (rowscale): GB_DxB__first_int8 // C+=B function (dense accum): GB_Cdense_accumB__first_int8 // C+=b function (dense accum): GB_Cdense_accumb__first_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_int8 // C=scalar+B GB_bind1st__first_int8 // C=scalar+B' GB_bind1st_tran__first_int8 // C=A+scalar (none) // C=A'+scalar (none) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = aij #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__first_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
covariance.c
/** * covariance.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif /* Problem size */ #define M SIZE #define N SIZE #define sqrt_of_array_cell(x, j) sqrt(x[j]) #define FLOAT_N 3214212.01 #define EPS 0.005 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *data) { int i, j; for (i = 1; i < (M + 1); i++) { for (j = 1; j < (N + 1); j++) { data[i * (N + 1) + j] = ((DATA_TYPE)i * j) / M; } } } int compareResults(DATA_TYPE *symmat, DATA_TYPE *symmat_outputFromGpu) { int i, j, fail; fail = 0; for (i = 1; i < (M + 1); i++) { for (j = 1; j < (N + 1); j++) { if (percentDiff(symmat[i * (N + 1) + j], symmat_outputFromGpu[i * (N + 1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void covariance(DATA_TYPE *data, DATA_TYPE *symmat, DATA_TYPE *mean) { int i, j, j1, j2; /* Determine mean of column vectors of input data matrix */ for (j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ for (i = 1; i < (N + 1); i++) { for (j = 1; j < (M + 1); j++) { data[i * (M + 1) + j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 1; j1 < (M + 1); j1++) { for (j2 = j1; j2 < (M + 1); j2++) { symmat[j1 * (M + 1) + j2] = 0.0; for (i = 1; i < N + 1; i++) { symmat[j1 * (M + 1) + j2] += data[i * (M + 1) + j1] * data[i * (M + 1) + j2]; } symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2]; } } } void covariance_OMP(DATA_TYPE *data, DATA_TYPE *data2, DATA_TYPE *symmat, DATA_TYPE *mean) { /* Determine mean of column vectors of input data matrix */ #pragma omp target data map(to: data[:(M+1)*(N+1)]) map(alloc: mean[:(M+1)]) map(tofrom: symmat[:(M+1)*(N+1)]) device(DEVICE_ID) { #pragma omp target teams distribute parallel for device(DEVICE_ID) for (int j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (int i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (int i = 1; i < (N + 1); i++) { for (int j = 1; j < (M + 1); j++) { data2[i * (M + 1) + j] = data[i * (M + 1) + j] - mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute parallel for device(DEVICE_ID) for (int j1 = 1; j1 < (M + 1); j1++) { for (int j2 = j1; j2 < (M + 1); j2++) { symmat[j1 * (M + 1) + j2] = 0.0; for (int i = 1; i < N + 1; i++) { symmat[j1 * (M + 1) + j2] += data2[i * (M + 1) + j1] * data2[i * (M + 1) + j2]; } symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2]; } } } } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *data; DATA_TYPE *data_GPU; DATA_TYPE *data2_GPU; DATA_TYPE *symmat; DATA_TYPE *mean; DATA_TYPE *mean_GPU; DATA_TYPE *symmat_outputFromGpu; data = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE)); data2_GPU = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE)); symmat = (DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE)); mean = (DATA_TYPE *)calloc((M + 1), sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE)); mean_GPU = (DATA_TYPE *)calloc((M + 1), sizeof(DATA_TYPE)); fprintf(stdout, "<< Covariance Computation >>\n"); init_arrays(data); t_start = rtclock(); covariance_OMP(data, data2_GPU, symmat_outputFromGpu, mean_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); covariance(data, symmat, mean); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(symmat, symmat_outputFromGpu); #endif free(data); free(symmat); free(mean); free(symmat_outputFromGpu); return fail; }
GB_binop__isne_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc64) // C=scalar+B GB (_bind1st__isne_fc64) // C=scalar+B' GB (_bind1st_tran__isne_fc64) // C=A+scalar GB (_bind2nd__isne_fc64) // C=A'+scalar GB (_bind2nd_tran__isne_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_isne (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC64_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC64 || GxB_NO_ISNE_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MaxPooling.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <vector> #include <random> #include "bb/Filter2d.h" namespace bb { // MaxPoolingクラス template <typename FT = float, typename BT = float> class MaxPooling : public Filter2d { using _super = Filter2d; using argmax_t = std::int32_t; public: static inline std::string ModelName(void) { return "MaxPooling"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<FT>::Name() + "_" + DataType<BT>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; index_t m_filter_h_size; index_t m_filter_w_size; index_t m_input_c_size = 0; index_t m_input_h_size = 0; index_t m_input_w_size = 0; index_t m_output_c_size = 0; index_t m_output_h_size = 0; index_t m_output_w_size = 0; indices_t m_input_shape; indices_t m_output_shape; std::stack<indices_t> m_stack_shape; public: struct create_t { index_t filter_h_size = 1; index_t filter_w_size = 1; }; protected: // コンストラクタ MaxPooling() {} MaxPooling(create_t const &create) { m_filter_h_size = create.filter_h_size; m_filter_w_size = create.filter_w_size; } /** * @brief コマンド処理 * @detail コマンド処理 * @param args コマンド */ void CommandProc(std::vector<std::string> args) { // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } public: ~MaxPooling() {} static std::shared_ptr<MaxPooling> Create(create_t const &create) { return std::shared_ptr<MaxPooling>(new MaxPooling(create)); } static std::shared_ptr<MaxPooling> Create(index_t filter_h_size=1, index_t filter_w_size=1) { create_t create; create.filter_h_size = filter_h_size; create.filter_w_size = filter_w_size; return Create(create); } #ifdef BB_PYBIND11 // 全パラメータを引数としたオーバーロード無しの生成関数(主にpython用) static std::shared_ptr<MaxPooling> CreatePy(index_t filter_h_size, index_t filter_w_size) { create_t create; create.filter_h_size = filter_h_size; create.filter_w_size = filter_w_size; return Create(create); } #endif index_t GetFilterHeight(void) const override { return m_filter_h_size; } index_t GetFilterWidth(void) const override { return m_filter_w_size; } /** * @brief 入力形状設定 * @detail 入力形状を設定する * 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする * 同一形状を指定しても内部変数は初期化されるものとする * @param shape 1フレームのノードを構成するshape * @return 出力形状を返す */ indices_t SetInputShape(indices_t shape) override { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } BB_ASSERT(shape.size() == 3); m_input_c_size = shape[0]; m_input_h_size = shape[1]; m_input_w_size = shape[2]; m_output_w_size = (m_input_w_size + m_filter_w_size - 1) / m_filter_w_size; m_output_h_size = (m_input_h_size + m_filter_h_size - 1) / m_filter_h_size; m_output_c_size = m_input_c_size; m_input_shape = shape; m_output_shape = indices_t({m_output_c_size, m_output_h_size, m_output_w_size}); return m_output_shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_output_shape; } protected: inline index_t GetInputNode(index_t c, index_t y, index_t x) { return (c * m_input_h_size + y) * m_input_w_size + x; } inline index_t GetOutputNode(index_t c, index_t y, index_t x) { return (c * m_output_h_size + y) * m_output_w_size + x; } public: void Clear(void) override { _super::Clear(); while (!m_stack_shape.empty()) { m_stack_shape.pop(); } _super::Clear(); } FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override { BB_ASSERT(x_buf.GetType() == DataType<FT>::type); // SetInputShpaeされていなければ初回に設定 if (x_buf.GetShape() != m_input_shape) { SetInputShape(x_buf.GetShape()); } // bit版(maxargを残すよりx_bufを残す方が小さい) if (DataType<FT>::type == BB_TYPE_BIT && m_filter_h_size * m_filter_w_size < 32) { return Forward_bit(x_buf, train); } // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<FT>::type); FrameBuffer argmax_buf(x_buf.GetFrameSize(), m_output_shape, DataType<argmax_t>::type); // backwardの為に保存 if ( train ) { this->m_stack_shape.push(x_buf.GetShape()); this->PushFrameBuffer(argmax_buf); } #ifdef BB_WITH_CUDA // FP32 CUDA版 if ( DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && argmax_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(true); auto ptr_argmax = argmax_buf.LockDeviceMemory(true); bbcu_fp32_MaxPooling_Forward ( (float const *)ptr_x.GetAddr(), (float *)ptr_y.GetAddr(), (int *)ptr_argmax.GetAddr(), (int )m_filter_h_size, (int )m_filter_w_size, (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)) ); return y_buf; } #endif /* // float用実装 if ( DataType<FT>::type == BB_TYPE_FP32 ) { auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.Lock<FT>(true); index_t m256_frame_size = (int)y_buf.GetFrameStride() / sizeof(float); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { float *y_addr = (float *)y_ptr.GetAddr(GetOutputNode(c, y, x)); for (index_t frame = 0; frame < m256_frame_size; frame += 8) { __m256 max_val = _mm256_set1_ps(-1.0e7f); // 前段に活性化入れるから0がminだよね? for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { float const *x_addr = (float const *)x_ptr.GetAddr(GetInputNode(c, iy, ix)); __m256 in_sig = _mm256_load_ps(&x_addr[frame]); max_val = _mm256_max_ps(max_val, in_sig); } } } } _mm256_store_ps(&y_addr[frame], max_val); } } } } return y_buf; } */ // 汎用版実装 { auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.Lock<FT>(true); auto argmax_ptr = argmax_buf.Lock<argmax_t>(true); auto frame_size = x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { FT max_val = std::numeric_limits<FT>::lowest(); argmax_t argmax = 0; argmax_t arg = 0; for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { FT in_sig = x_ptr.Get(frame, {c, iy, ix}); if ( in_sig > max_val ) { max_val = in_sig; argmax = arg; } arg++; } } } } y_ptr.Set(frame, {c, y, x}, max_val); argmax_ptr.Set(frame, {c, y, x}, argmax); } } } } return y_buf; } } FrameBuffer Forward_bit(FrameBuffer x_buf, bool train = true) { // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<FT>::type); // backwardの為に保存 if ( train ) { this->PushFrameBuffer(x_buf); } // Bit CUDA版 #ifdef BB_WITH_CUDA if ( DataType<FT>::type == BB_TYPE_BIT && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(true); bbcu_bit_MaxPooling_Forward ( (int const *)ptr_x.GetAddr(), (int *)ptr_y.GetAddr(), (int )m_filter_h_size, (int )m_filter_w_size, (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(int)) ); return y_buf; } #endif if ( DataType<FT>::type == BB_TYPE_BIT ) { // バイナリ用実装 auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.Lock<FT>(true); index_t m256_frame_size = (int)y_buf.GetFrameStride() / 32; #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { __m256i *y_addr = (__m256i *)y_ptr.GetAddr(GetOutputNode(c, y, x)); for (index_t frame = 0; frame < m256_frame_size; ++frame) { __m256i max_val = _mm256_set1_epi8(0); for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { __m256i const *x_addr = (__m256i const *)x_ptr.GetAddr(GetInputNode(c, iy, ix)); __m256i in_sig = _mm256_load_si256(&x_addr[frame]); max_val = _mm256_or_si256(max_val, in_sig); } } } } _mm256_store_si256(&y_addr[frame], max_val); } } } } return y_buf; } // 汎用版実装 { auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.Lock<FT>(true); auto frame_size = x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { FT max_val = 0; for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { FT in_sig = x_ptr.Get(frame, {c, iy, ix}); if ( in_sig ) { max_val = in_sig; } } } } } y_ptr.Set(frame, {c, y, x}, max_val); } } } } return y_buf; } } FrameBuffer Backward(FrameBuffer dy_buf) override { if (dy_buf.Empty()) { return dy_buf; } // bit版(maxargを残すよりx_bufを残す方が小さい) if (DataType<FT>::type == BB_TYPE_BIT && m_filter_h_size * m_filter_w_size < 32) { return Backward_bit(dy_buf); } // FrameBuffer y_buf = this->PopFrameBuffer(); // FrameBuffer x_buf = this->PopFrameBuffer(); // BB_ASSERT(x_buf.GetType() == DataType<FT>::type); // BB_ASSERT(y_buf.GetType() == DataType<FT>::type); auto input_shape = m_stack_shape.top(); m_stack_shape.pop(); FrameBuffer argmax_buf = this->PopFrameBuffer(); SetInputShape(input_shape); BB_ASSERT(dy_buf.GetType() == DataType<BT>::type); BB_ASSERT(dy_buf.GetShape() == argmax_buf.GetShape()); FrameBuffer dx_buf(dy_buf.GetFrameSize(), input_shape, DataType<BT>::type); #ifdef BB_WITH_CUDA if ( DataType<BT>::type == BB_TYPE_FP32 && DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && argmax_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto ptr_argmax = argmax_buf.LockDeviceMemoryConst(); auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(true); bbcu_fp32_MaxPooling_Backward ( (int const *)ptr_argmax.GetAddr(), (float const *)ptr_dy.GetAddr(), (float* )ptr_dx.GetAddr(), (int )m_filter_h_size, (int )m_filter_w_size, (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )dy_buf.GetFrameSize(), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif /* #ifdef BB_WITH_CUDA if ( DataType<FT>::type == BB_TYPE_BIT && DataType<BT>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemoryConst(); auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(true); bbcu_bit_fp32_MaxPooling_Backward ( (int const *)ptr_x.GetAddr(), (int const *)ptr_y.GetAddr(), (float const *)ptr_dy.GetAddr(), (float* )ptr_dx.GetAddr(), (int )m_filter_h_size, (int )m_filter_w_size, (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )dy_buf.GetFrameSize(), (int )(x_buf.GetFrameStride() / sizeof(int)), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif if ( DataType<BT>::type == BB_TYPE_FP32 && DataType<FT>::type == BB_TYPE_FP32 ) { // float用実装 index_t m256_frame_size = dx_buf.GetFrameStride() / sizeof(float); auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.LockConst<FT>(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(true); #pragma omp parallel for for (index_t n = 0; n < m_input_c_size; ++n) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { float const * y_addr = (float const *)y_ptr.GetAddr(GetOutputNode(n, y, x)); float const * dy_addr = (float const *)dy_ptr.GetAddr(GetOutputNode(n, y, x)); for (index_t frame = 0; frame < m256_frame_size; frame += 8) { __m256 out_sig = _mm256_load_ps(&y_addr[frame]); __m256 out_grad = _mm256_load_ps(&dy_addr[frame]); for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { float const *x_addr = (float const *)x_ptr.GetAddr(GetInputNode(n, iy, ix)); float *dx_addr = (float *)dx_ptr.GetAddr(GetInputNode(n, iy, ix)); __m256 in_sig = _mm256_load_ps(&x_addr[frame]); __m256 mask = _mm256_cmp_ps(in_sig, out_sig, _CMP_EQ_OQ); __m256 in_grad = _mm256_and_ps(mask, out_grad); _mm256_store_ps(&dx_addr[frame], in_grad); } } } } } } } } return dx_buf; } */ // 汎用版実装 { auto argmax_ptr = argmax_buf.LockConst<argmax_t>(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(true); auto frame_size = dy_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { argmax_t arg = 0; argmax_t argmax = argmax_ptr.Get(frame, c, y, x); BT grad = dy_ptr.Get(frame, c, y, x); for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { dx_ptr.Set(frame, c, iy, ix, (arg == argmax) ? grad : (BT)0); arg++; } } } } } } } } return dx_buf; } } FrameBuffer Backward_bit(FrameBuffer dy_buf) { FrameBuffer x_buf = this->PopFrameBuffer(); BB_ASSERT(x_buf.GetType() == DataType<FT>::type); SetInputShape(x_buf.GetShape()); BB_ASSERT(dy_buf.GetType() == DataType<BT>::type); BB_ASSERT(dy_buf.GetShape() == m_output_shape); FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<BT>::type); #ifdef BB_WITH_CUDA if ( DataType<FT>::type == BB_TYPE_BIT && DataType<BT>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(true); bbcu_bit_fp32_MaxPooling_Backward ( (int const *)ptr_x.GetAddr(), (float const *)ptr_dy.GetAddr(), (float* )ptr_dx.GetAddr(), (int )m_filter_h_size, (int )m_filter_w_size, (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )dy_buf.GetFrameSize(), (int )(x_buf.GetFrameStride() / sizeof(int)), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif // 汎用版実装 { auto x_ptr = x_buf.LockConst<FT>(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(true); auto frame_size = dy_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { BT grad = dy_ptr.Get(frame, c, y, x); for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { FT in_sig = x_ptr.Get(frame, c, iy, ix); dx_ptr.Set(frame, c, iy, ix, in_sig ? grad : (BT)0); if ( in_sig ) { grad = 0; } } } } } } } } } return dx_buf; } } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_filter_h_size); bb::SaveValue(os, m_filter_w_size); bb::SaveValue(os, m_input_c_size); bb::SaveValue(os, m_input_h_size); bb::SaveValue(os, m_input_w_size); bb::SaveValue(os, m_output_c_size); bb::SaveValue(os, m_output_h_size); bb::SaveValue(os, m_output_w_size); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_filter_h_size); bb::LoadValue(is, m_filter_w_size); bb::LoadValue(is, m_input_c_size); bb::LoadValue(is, m_input_h_size); bb::LoadValue(is, m_input_w_size); bb::LoadValue(is, m_output_c_size); bb::LoadValue(is, m_output_h_size); bb::LoadValue(is, m_output_w_size); // 再構築 m_input_shape = indices_t({m_input_c_size, m_input_h_size, m_input_w_size}); m_output_shape = indices_t({m_output_c_size, m_output_h_size, m_output_w_size}); } }; }
indirectaccess3-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Two pointers have distance of 12 (p1 - p2 = 12). // They are used as base addresses for indirect array accesses using an index set (another array). // // An index set has two indices with distance of 12 : // indexSet[3]- indexSet[0] = 533 - 521 = 12 // So there is loop carried dependence for N=0 and N=3. // // We use the default loop scheduling (static even) in OpenMP. // It is possible that two dependent iterations will be scheduled // within a same chunk to a same thread. So there is no runtime data races. // // N is 180, two iteraions with N=0 and N= 3 have loop carried dependences. // For static even scheduling, we must have at least 60 threads (180/60=3 iterations) // so iteration 0 and 3 will be scheduled to two different threads. // // Liao, 3/29/2017 #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 533, 529, 531, // change 527 to 521+12=533 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa3 = xa1 + 12; int i; // initialize segments touched by indexSet for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } #pragma omp parallel for // default static even scheduling may not trigger data race! for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0; xa3[idx]+= 3.0; } printf("x1[999]=%f xa3[1285]=%f\n", xa1[999], xa3[1285]); free (base); return 0; }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> namespace mxnet { namespace common { template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /* * \brief setup default-storage tblobs from source NDArrays. If any source NDArray has non-default * storage, it creates a temp NDArray with default storage and uses the temp tblob. The * function also records the indices of non-default source NDArrays and the indices of * their corresponding temporary NDArrays in the temp array. * \param src list of source NDArray * \param blobs list of tblobs to return * \param temp_src list of source NDArrays which requires temporary default storage representation * \param temp_dst list of temporary destination NDArrays for default storage representation * \param idx_map mapping from indices in source NDArrays to indices in temp_dst. When not set, indices are not recorded * \return true if any source NDArray need to cast storage */ inline bool SetupDefaultBlobs(const std::vector<NDArray>& src, std::vector<TBlob> *blobs, std::vector<NDArray> *temp_src, std::vector<NDArray> *temp_dst, std::unordered_map<uint32_t, uint32_t> *idx_map = nullptr) { bool require_cast = false; for (size_t i = 0; i < src.size(); i++) { auto& nd = src[i]; if (nd.storage_type() != kDefaultStorage) { if (idx_map != nullptr) { (*idx_map)[i] = temp_dst->size(); } NDArray temp(nd.shape(), nd.ctx(), false, nd.dtype()); temp_src->emplace_back(nd); temp_dst->emplace_back(temp); blobs->emplace_back(temp.data()); require_cast = true; } else { blobs->push_back(nd.data()); } } return require_cast; } /* * \brief cast the NDArrays in `src` and store the result in NDArrays in `dst`. * This is only used for storage fallback in executor. * When storage_fallback is false, and `MXNET_EXEC_STORAGE_FALLBACK` == 0, * storage fallback is disallowed. * \param src list of source NDArray to cast * \param dst list of destionation NDArray which hold the result of cast_storage operation * \param ctx operator context for cast_storage operation * \param storage_fallback whether storage_fallback is allowed. When set to false, * its value depends on `MXNET_EXEC_STORAGE_FALLBACK`. */ template <typename xpu> inline void CastNonDefaultStorage(const std::vector<NDArray>& src, const std::vector<NDArray>& dst, const OpContext& ctx, bool storage_fallback = false) { CHECK_GE(dst.size(), src.size()); if (src.size() == 0) return; if (storage_fallback == false) { storage_fallback = dmlc::GetEnv("MXNET_EXEC_STORAGE_FALLBACK", true); } if (storage_fallback == false) { LOG(FATAL) << "Storage type conversion detected during execution. " << "You are probably executing an operator which " << "doesn't support NDArray inputs with non-default storage."; } for (size_t i = 0; i < src.size(); i++) { CastStorageDispatch<xpu>(ctx, src[i], dst[i]); } } // Check if any storage type is not default storage inline bool ContainsNonDefaultStorage(const StorageTypeVector& vstorage) { for (const auto& i : vstorage) { if (i != kUndefinedStorage && i != kDefaultStorage) return true; } return false; } // Check if any NDArray in the list has default storage inline bool ContainsDefaultStorage(const std::vector<NDArray>& ndarrays) { for (const auto &nd : ndarrays) { if (nd.storage_type() == kDefaultStorage) { return true; } } return false; } inline bool ContainsNonDefaultStorage(const std::vector<NDArray>& ndarrays) { for (const auto &nd : ndarrays) { if (nd.storage_type() != kUndefinedStorage && nd.storage_type() != kDefaultStorage) { return true; } } return false; } inline bool ContainsStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { for (const auto &nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } return false; } inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto &nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } // heuristic to dermine number of threads per GPU inline int GetNumThreadPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask"; return nullptr; } } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
480d1a_prot_ac_so4.c
#define _POSIX_C_SOURCE 200809L #define START_TIMER(S) \ struct timeval start_##S, end_##S; \ gettimeofday(&start_##S, NULL); #define STOP_TIMER(S, T) \ gettimeofday(&end_##S, NULL); \ T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000; #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads); int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, const int x0_blk0_size, const int y0_blk0_size, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { float(*restrict src)[src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[src_vec->size[1]])src_vec->data; float(*restrict src_coords)[src_coords_vec->size[1]] __attribute__((aligned(64))) = (float(*)[src_coords_vec->size[1]])src_coords_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); for (int time = time_m, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= time_M; time += 1, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3)) { /* Begin section0 */ START_TIMER(section0) bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); STOP_TIMER(section0, timers) /* End section0 */ /* Begin section1 */ START_TIMER(section1) #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F / 3.0F) * (p_src_M - p_src_m + 1) / nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic, chunk_size) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { float posx = -o_x + src_coords[p_src][0]; float posy = -o_y + src_coords[p_src][1]; float posz = -o_z + src_coords[p_src][2]; int ii_src_0 = (int)(floor(6.66667e-2 * posx)); int ii_src_1 = (int)(floor(6.66667e-2 * posy)); int ii_src_2 = (int)(floor(6.66667e-2 * posz)); int ii_src_3 = (int)(floor(6.66667e-2 * posz)) + 1; int ii_src_4 = (int)(floor(6.66667e-2 * posy)) + 1; int ii_src_5 = (int)(floor(6.66667e-2 * posx)) + 1; float px = (float)(posx - 1.5e+1F * (int)(floor(6.66667e-2F * posx))); float py = (float)(posy - 1.5e+1F * (int)(floor(6.66667e-2F * posy))); float pz = (float)(posz - 1.5e+1F * (int)(floor(6.66667e-2F * posz))); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r0 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] * vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * px * py + 4.44445e-3F * px * pz - 6.66667e-2F * px + 4.44445e-3F * py * pz - 6.66667e-2F * py - 6.66667e-2F * pz + 1) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] += r0; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r1 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] * vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4]) * (2.96296e-4F * px * py * pz - 4.44445e-3F * px * pz - 4.44445e-3F * py * pz + 6.66667e-2F * pz) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] += r1; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r2 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] * vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4]) * (2.96296e-4F * px * py * pz - 4.44445e-3F * px * py - 4.44445e-3F * py * pz + 6.66667e-2F * py) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] += r2; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r3 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] * vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * py * pz) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] += r3; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r4 = (dt * dt) * (vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] * vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4]) * (2.96296e-4F * px * py * pz - 4.44445e-3F * px * py - 4.44445e-3F * px * pz + 6.66667e-2F * px) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] += r4; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r5 = (dt * dt) * (vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] * vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * px * pz) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] += r5; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r6 = (dt * dt) * (vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] * vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * px * py) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] += r6; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r7 = 2.96296e-4F * px * py * pz * (dt * dt) * (vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] * vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4]) * src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] += r7; } } } STOP_TIMER(section1, timers) /* End section1 */ } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size) { for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1) { for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r15 = 1.0 / dt; float r14 = 1.0 / (dt * dt); float r13 = 1.0 / (vp[x + 4][y + 4][z + 4] * vp[x + 4][y + 4][z + 4]); u[t2][x + 4][y + 4][z + 4] = (r13 * (-r14 * (-2.0F * u[t0][x + 4][y + 4][z + 4] + u[t1][x + 4][y + 4][z + 4])) + r15 * (damp[x + 1][y + 1][z + 1] * u[t0][x + 4][y + 4][z + 4]) - 3.70370379e-4F * (u[t0][x + 2][y + 4][z + 4] + u[t0][x + 4][y + 2][z + 4] + u[t0][x + 4][y + 4][z + 2] + u[t0][x + 4][y + 4][z + 6] + u[t0][x + 4][y + 6][z + 4] + u[t0][x + 6][y + 4][z + 4]) + 5.92592607e-3F * (u[t0][x + 3][y + 4][z + 4] + u[t0][x + 4][y + 3][z + 4] + u[t0][x + 4][y + 4][z + 3] + u[t0][x + 4][y + 4][z + 5] + u[t0][x + 4][y + 5][z + 4] + u[t0][x + 5][y + 4][z + 4]) - 3.33333341e-2F * u[t0][x + 4][y + 4][z + 4]) / (r13 * r14 + r15 * damp[x + 1][y + 1][z + 1]); } } } } } } }
bli_dotv_bgq_int.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of The University of Texas at Austin nor the names of its contributors may be used to endorse or promote products derived derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" void bli_ddotv_bgq_int ( conj_t conjx, conj_t conjy, dim_t n, double* restrict x, inc_t incx, double* restrict y, inc_t incy, double* restrict rho, cntx_t* cntx ) { bool_t use_ref = FALSE; // If the vector lengths are zero, set rho to zero and return. if ( bli_zero_dim1( n ) ) { PASTEMAC(d,set0s)( rho ); return; } // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. if ( incx != 1 || incy != 1 || bli_is_unaligned_to( x, 32 ) || bli_is_unaligned_to( y, 32 ) ) use_ref = TRUE; // Call the reference implementation if needed. if ( use_ref ) { BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho, cntx ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; double rhos = 0.0; #pragma omp parallel reduction(+:rhos) { dim_t n_threads; dim_t t_id = omp_get_thread_num(); n_threads = omp_get_num_threads(); vector4double rhov = vec_splats( 0.0 ); vector4double xv, yv; for ( dim_t i = t_id; i < n_run; i += n_threads ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); rhov = vec_madd( xv, yv, rhov ); } rhos += vec_extract( rhov, 0 ); rhos += vec_extract( rhov, 1 ); rhos += vec_extract( rhov, 2 ); rhos += vec_extract( rhov, 3 ); } for ( dim_t i = 0; i < n_left; i++ ) { rhos += x[4*n_run + i] * y[4*n_run + i]; } *rho = rhos; }
J2OrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H #include <map> #include <numeric> #include "Configuration.h" #if !defined(QMC_BUILD_SANDBOX_ONLY) #include "QMCWaveFunctions/WaveFunctionComponent.h" #include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h" #endif #include "Particle/DistanceTableData.h" #include "LongRange/StructFact.h" #include <CPU/SIMD/aligned_allocator.hpp> #include <CPU/SIMD/algorithm.hpp> namespace qmcplusplus { // helper class to activate KEcorr during optimizing Jastrow template<typename RT, class FT> class J2KECorrection { size_t num_groups_; std::vector<size_t> num_elec_in_groups_; RT num_elecs_; RT vol; RT G0mag; const std::vector<FT*>& F_; bool SK_enabled; public: J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F) : num_groups_(targetPtcl.groups()), num_elecs_(targetPtcl.getTotalNum()), vol(targetPtcl.Lattice.Volume), F_(F), SK_enabled(targetPtcl.SK != nullptr) { // compute num_elec_in_groups_ num_elec_in_groups_.reserve(3); for (int i = 0; i < num_groups_; i++) num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i)); if (SK_enabled) G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]); } RT computeKEcorr() { if (!SK_enabled) return 0; const int numPoints = 1000; RT uk = 0.0; RT a = 1.0; for (int i = 0; i < num_groups_; i++) { int Ni = num_elec_in_groups_[i]; for (int j = 0; j < num_groups_; j++) { int Nj = num_elec_in_groups_[j]; if (F_[i * num_groups_ + j]) { FT& ufunc = *(F_[i * num_groups_ + j]); RT radius = ufunc.cutoff_radius; RT k = G0mag; RT dr = radius / (RT)(numPoints - 1); for (int ir = 0; ir < numPoints; ir++) { RT r = dr * (RT)ir; RT u = ufunc.evaluate(r); uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj); } } } } for (int iter = 0; iter < 20; iter++) a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a))); return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_; } }; /** @ingroup WaveFunctionComponent * @brief Specialization for two-body Jastrow function using multiple functors * * Each pair-type can have distinct function \f$u(r_{ij})\f$. * For electrons, distinct pair correlation functions are used * for spins up-up/down-down and up-down/down-up. * * Based on J2OrbitalSoA.h with these considerations * - DistanceTableData using SoA containers * - support mixed precision: FT::real_type != OHMMS_PRECISION * - loops over the groups: elminated PairID * - support simd function * - double the loop counts * - Memory use is O(N). */ template<class FT> class J2OrbitalSoA : public WaveFunctionComponent { public: ///alias FuncType using FuncType = FT; ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using DistRow = DistanceTableData::DistRow; using DisplRow = DistanceTableData::DisplRow; using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>; // Ye: leaving this public is bad but currently used by unit tests. ///Container for \f$F[ig*NumGroups+jg]\f$. std::vector<FT*> F; protected: ///number of particles size_t N; ///number of particles + padded size_t N_padded; ///number of groups of the target particleset size_t NumGroups; ///diff value RealType DiffVal; ///Correction RealType KEcorr; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ gContainer_type dUat; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat; valT cur_Uat; aligned_vector<valT> cur_u, cur_du, cur_d2u; aligned_vector<valT> old_u, old_du, old_d2u; aligned_vector<valT> DistCompressed; aligned_vector<int> DistIndice; ///Uniquue J2 set for cleanup std::map<std::string, FT*> J2Unique; /// e-e table ID const int my_table_ID_; // helper for compute J2 Chiesa KE correction J2KECorrection<RealType, FT> j2_ke_corr_helper; public: J2OrbitalSoA(ParticleSet& p, int tid); J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete; ~J2OrbitalSoA(); /* initialize storage */ void init(ParticleSet& p); /** add functor for (ia,ib) pair */ void addFunc(int ia, int ib, FT* j); void resetTargetParticleSet(ParticleSet& P) { if (dPsi) dPsi->resetTargetParticleSet(P); } /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.getIndex(active); Optimizable = myVars.is_optimizable(); typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->checkOutVariables(active); ++it; } if (dPsi) dPsi->checkOutVariables(active); } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->resetParameters(active); ++it; } if (dPsi) dPsi->resetParameters(active); for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } } void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->myVars.print(os); ++it; } } WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const; LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L); void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi); /** recompute internal data assuming distance table is fully ready */ void recompute(ParticleSet& P); PsiValueType ratio(ParticleSet& P, int iat); void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).getDistRow(k))); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios); GradType evalGrad(ParticleSet& P, int iat); PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat); void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false); inline void restore(int iat) {} /** compute G and L after the sweep */ void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false); inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(N), N); dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(N), N); } LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } /*@{ internal compute engines*/ inline valT computeU(const ParticleSet& P, int iat, const DistRow& dist) { valT curUat(0); const int igt = P.GroupID[iat] * NumGroups; for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = P.last(jg); curUat += f2.evaluateV(iat, iStart, iEnd, dist.data(), DistCompressed.data()); } return curUat; } inline void computeU3(const ParticleSet& P, int iat, const DistRow& dist, RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle = false); /** compute gradient */ inline posT accumulateG(const valT* restrict du, const DisplRow& displ) const { posT grad; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); #pragma omp simd reduction(+ : s) aligned(du, dX) for (int jat = 0; jat < N; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } return grad; } /**@} */ RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); } RealType KECorrection() { return KEcorr; } }; template<typename FT> J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : my_table_ID_(p.addTable(p, DT_SOA)), j2_ke_corr_helper(p, F) { init(p); KEcorr = 0.0; ClassName = "J2OrbitalSoA"; } template<typename FT> J2OrbitalSoA<FT>::~J2OrbitalSoA() { auto it = J2Unique.begin(); while (it != J2Unique.end()) { delete ((*it).second); ++it; } } //need to clean up J2Unique template<typename FT> void J2OrbitalSoA<FT>::init(ParticleSet& p) { N = p.getTotalNum(); N_padded = getAlignedSize<valT>(N); NumGroups = p.groups(); Uat.resize(N); dUat.resize(N); d2Uat.resize(N); cur_u.resize(N); cur_du.resize(N); cur_d2u.resize(N); old_u.resize(N); old_du.resize(N); old_d2u.resize(N); F.resize(NumGroups * NumGroups, nullptr); DistCompressed.resize(N); DistIndice.resize(N); } template<typename FT> void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j) { if (ia == ib) { if (ia == 0) //first time, assign everything { int ij = 0; for (int ig = 0; ig < NumGroups; ++ig) for (int jg = 0; jg < NumGroups; ++jg, ++ij) if (F[ij] == nullptr) F[ij] = j; } else F[ia * NumGroups + ib] = j; } else { if (N == 2) { // a very special case, 1 up + 1 down // uu/dd was prevented by the builder for (int ig = 0; ig < NumGroups; ++ig) for (int jg = 0; jg < NumGroups; ++jg) F[ig * NumGroups + jg] = j; } else { // generic case F[ia * NumGroups + ib] = j; F[ib * NumGroups + ia] = j; } } std::stringstream aname; aname << ia << ib; J2Unique[aname.str()] = j; } template<typename FT> WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const { J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(tqp, -1); if (dPsi) j2copy->dPsi = dPsi->makeClone(tqp); std::map<const FT*, FT*> fcmap; for (int ig = 0; ig < NumGroups; ++ig) for (int jg = ig; jg < NumGroups; ++jg) { int ij = ig * NumGroups + jg; if (F[ij] == 0) continue; typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]); if (fit == fcmap.end()) { FT* fc = new FT(*F[ij]); j2copy->addFunc(ig, jg, fc); //if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc); fcmap[F[ij]] = fc; } } j2copy->Optimizable = Optimizable; return j2copy; } /** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$ * @param P particleset * @param iat particle index * @param dist starting distance * @param u starting value * @param du starting first deriv * @param d2u starting second deriv */ template<typename FT> inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P, int iat, const DistRow& dist, RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle) { const int jelmax = triangle ? iat : N; constexpr valT czero(0); std::fill_n(u, jelmax, czero); std::fill_n(du, jelmax, czero); std::fill_n(d2u, jelmax, czero); const int igt = P.GroupID[iat] * NumGroups; for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = std::min(jelmax, P.last(jg)); f2.evaluateVGL(iat, iStart, iEnd, dist.data(), u, du, d2u, DistCompressed.data(), DistIndice.data()); } //u[iat]=czero; //du[iat]=czero; //d2u[iat]=czero; } template<typename FT> typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat) { //only ratio, ready to compute it again UpdateMode = ORB_PBYP_RATIO; cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).getTempDists()); return std::exp(static_cast<PsiValueType>(Uat[iat] - cur_Uat)); } template<typename FT> inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const auto& d_table = P.getDistTable(my_table_ID_); const auto& dist = d_table.getTempDists(); for (int ig = 0; ig < NumGroups; ++ig) { const int igt = ig * NumGroups; valT sumU(0); for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = P.last(jg); sumU += f2.evaluateV(-1, iStart, iEnd, dist.data(), DistCompressed.data()); } for (int i = P.first(ig); i < P.last(ig); ++i) { // remove self-interaction const valT Uself = F[igt + ig]->evaluate(dist[i]); ratios[i] = std::exp(Uat[i] + Uself - sumU); } } } template<typename FT> typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } template<typename FT> typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; computeU3(P, iat, P.getDistTable(my_table_ID_).getTempDists(), cur_u.data(), cur_du.data(), cur_d2u.data()); cur_Uat = simd::accumulate_n(cur_u.data(), N, valT()); DiffVal = Uat[iat] - cur_Uat; grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).getTempDispls()); return std::exp(static_cast<PsiValueType>(DiffVal)); } template<typename FT> void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat, bool safe_to_delay) { // get the old u, du, d2u const auto& d_table = P.getDistTable(my_table_ID_); computeU3(P, iat, d_table.getOldDists(), old_u.data(), old_du.data(), old_d2u.data()); if (UpdateMode == ORB_PBYP_RATIO) { //ratio-only during the move; need to compute derivatives const auto& dist = d_table.getTempDists(); computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data()); } valT cur_d2Uat(0); const auto& new_dr = d_table.getTempDispls(); const auto& old_dr = d_table.getOldDispls(); constexpr valT lapfac = OHMMS_DIM - RealType(1); #pragma omp simd reduction(+ : cur_d2Uat) for (int jat = 0; jat < N; jat++) { const valT du = cur_u[jat] - old_u[jat]; const valT newl = cur_d2u[jat] + lapfac * cur_du[jat]; const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl; Uat[jat] += du; d2Uat[jat] += dl; cur_d2Uat -= newl; } posT cur_dUat; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict new_dX = new_dr.data(idim); const valT* restrict old_dX = old_dr.data(idim); const valT* restrict cur_du_pt = cur_du.data(); const valT* restrict old_du_pt = old_du.data(); valT* restrict save_g = dUat.data(idim); valT cur_g = cur_dUat[idim]; #pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt) for (int jat = 0; jat < N; jat++) { const valT newg = cur_du_pt[jat] * new_dX[jat]; const valT dg = newg - old_du_pt[jat] * old_dX[jat]; save_g[jat] -= dg; cur_g += newg; } cur_dUat[idim] = cur_g; } LogValue += Uat[iat] - cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; } template<typename FT> void J2OrbitalSoA<FT>::recompute(ParticleSet& P) { const auto& d_table = P.getDistTable(my_table_ID_); for (int ig = 0; ig < NumGroups; ++ig) { for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat) { computeU3(P, iat, d_table.getDistRow(iat), cur_u.data(), cur_du.data(), cur_d2u.data(), true); Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT()); posT grad; valT lap(0); const valT* restrict u = cur_u.data(); const valT* restrict du = cur_du.data(); const valT* restrict d2u = cur_d2u.data(); const auto& displ = d_table.getDisplRow(iat); constexpr valT lapfac = OHMMS_DIM - RealType(1); #pragma omp simd reduction(+ : lap) aligned(du, d2u) for (int jat = 0; jat < iat; ++jat) lap += d2u[jat] + lapfac * du[jat]; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); #pragma omp simd reduction(+ : s) aligned(du, dX) for (int jat = 0; jat < iat; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } dUat(iat) = grad; d2Uat[iat] = -lap; // add the contribution from the upper triangle #pragma omp simd aligned(u, du, d2u) for (int jat = 0; jat < iat; jat++) { Uat[jat] += u[jat]; d2Uat[jat] -= d2u[jat] + lapfac * du[jat]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict dX = displ.data(idim); #pragma omp simd aligned(save_g, du, dX) for (int jat = 0; jat < iat; jat++) save_g[jat] -= du[jat] * dX[jat]; } } } } template<typename FT> typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } template<typename FT> void J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch) { if (fromscratch) recompute(P); LogValue = valT(0); for (int iat = 0; iat < N; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } LogValue = -LogValue * 0.5; } template<typename FT> void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi) { LogValue = 0.0; const DistanceTableData& d_ee(P.getDistTable(my_table_ID_)); valT dudr, d2udr2; Tensor<valT, DIM> ident; grad_grad_psi = 0.0; ident.diagonal(1.0); for (int i = 1; i < N; ++i) { const auto& dist = d_ee.getDistRow(i); const auto& displ = d_ee.getDisplRow(i); auto ig = P.GroupID[i]; const int igt = ig * NumGroups; for (int j = 0; j < i; ++j) { auto r = dist[j]; auto rinv = 1.0 / r; auto dr = displ[j]; auto jg = P.GroupID[j]; auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2); LogValue -= uij; auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv; grad_grad_psi[i] -= hess; grad_grad_psi[j] -= hess; } } } } // namespace qmcplusplus #endif
DRB104-nowait-barrier-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is based on one code snippet extracted from a paper: Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 Explicit barrier to counteract nowait */ #include <stdio.h> #include <assert.h> int main() { int i, error; int len = 1000; int a[len], b = 5; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=i; } #pragma cetus private(i) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=(b+(a[i]*5)); } error=(a[9]+1); (((void)sizeof ((error==51) ? 1 : 0)), ({ if (error==51) { ; } else { __assert_fail("error == 51", "DRB104-nowait-barrier-orig-no.c", 69, __PRETTY_FUNCTION__); } })); printf("error = %d\n", error); _ret_val_0=0; return _ret_val_0; }
rkb_screen.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <complex.h> #include <assert.h> #include "cint.h" #include "cvhf.h" #include "optimizer.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) #define LL 0 #define SS 1 #define SL 2 #define LS 3 int int2e_spinor(); int int2e_spsp1spsp2_spinor(); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int n = opt->nbas; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l]; double dmin = opt->direct_scf_cutoff / qijkl; return qijkl > opt->direct_scf_cutoff &&((opt->dm_cond[j*n+i] > dmin) || (opt->dm_cond[l*n+k] > dmin) || (opt->dm_cond[j*n+k] > dmin) || (opt->dm_cond[j*n+l] > dmin) || (opt->dm_cond[i*n+k] > dmin) || (opt->dm_cond[i*n+l] > dmin)); } int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt, double **dms_cond, int n_dm, double *dm_atleast, int *atm, int *bas, double *env) { int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int nbas = opt->nbas; int idm; double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l]; double *pdmscond = opt->dm_cond + nbas*nbas; for (idm = 0; idm < (n_dm+1)/2; idm++) { // note in _vhf.rdirect_mapdm, J and K share the same DM dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk } *dm_atleast = opt->direct_scf_cutoff / qijkl; return 1; } int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int n = opt->nbas; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double *dmsl = opt->dm_cond + n*n*SL; double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l]; double dmin = opt->direct_scf_cutoff / qijkl; return qijkl > opt->direct_scf_cutoff &&((opt->dm_cond[n*n*SS+j*n+i] > dmin) || (opt->dm_cond[l*n+k] > dmin) || (dmsl[j*n+k] > dmin) || (dmsl[j*n+l] > dmin) || (dmsl[i*n+k] > dmin) || (dmsl[i*n+l] > dmin)); } // be careful with the order in dms_cond, the current order (dmll, dmss, dmsl) // is consistent to the function _call_veff_ssll in dhf.py int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt, double **dms_cond, int n_dm, double *dm_atleast, int *atm, int *bas, double *env) { int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int nbas = opt->nbas; int idm; double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l]; double *pdmscond = opt->dm_cond + 4*nbas*nbas; int nset = (n_dm+2) / 3; double *dmscondll = pdmscond + nset*nbas*nbas*LL; double *dmscondss = pdmscond + nset*nbas*nbas*SS; double *dmscondsl = pdmscond + nset*nbas*nbas*SL; for (idm = 0; idm < nset; idm++) { dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas; dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas; dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas; } *dm_atleast = opt->direct_scf_cutoff / qijkl; return 1; } static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { int shls_slice[] = {0, nbas}; const int cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, cintopt, qcond, ao_loc, atm, natm, bas, nbas, env) { double qtmp, tmp; int i, j, ij, di, dj, ish, jsh; int shls[4]; double *cache = malloc(sizeof(double) * cache_size); di = 0; for (ish = 0; ish < nbas; ish++) { dj = ao_loc[ish+1] - ao_loc[ish]; di = MAX(di, dj); } double complex *buf = malloc(sizeof(double complex) * di*di*di*di); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nbas*(nbas+1)/2; ij++) { ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7); jsh = ij - ish*(ish+1)/2; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[0] = ish; shls[1] = jsh; shls[2] = ish; shls[3] = jsh; qtmp = 1e-100; if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]); qtmp = MAX(qtmp, tmp); } } qtmp = sqrt(qtmp); } qcond[ish*nbas+jsh] = qtmp; qcond[jsh*nbas+ish] = qtmp; } free(buf); free(cache); } } void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->q_cond) { free(opt->q_cond); } opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas); assert(intor == &int2e_spinor); set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env); } void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->q_cond) { free(opt->q_cond); } opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas); assert(intor == &int2e_spsp1spsp2_spinor); set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env); double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]); double *qcond = opt->q_cond; int i; for (i = 0; i < nbas*nbas; i++) { qcond[i] *= c1; } } void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->q_cond) { free(opt->q_cond); } opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2); set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env); set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc, atm, natm, bas, nbas, env); double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]); double *qcond = opt->q_cond + nbas*nbas; int i; for (i = 0; i < nbas*nbas; i++) { qcond[i] *= c1; } } static void set_dmcond(double *dmcond, double *dmscond, double complex *dm, double direct_scf_cutoff, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { const int nao = ao_loc[nbas]; double dmax, dmaxi, tmp; int i, j, ish, jsh; int iset; double complex *pdm; for (ish = 0; ish < nbas; ish++) { for (jsh = 0; jsh < nbas; jsh++) { dmax = 0; for (iset = 0; iset < nset; iset++) { dmaxi = 0; pdm = dm + nao*nao*iset; for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) { for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) { tmp = cabs(pdm[i*nao+j]); dmaxi = MAX(dmaxi, tmp); } } dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi; dmax = MAX(dmax, dmaxi); } dmcond[ish*nbas+jsh] = dmax; } } } // dm_cond ~ 1+nset, dm_cond + dms_cond void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call free(opt->dm_cond); } opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset)); memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset)); // dmcond followed by dmscond which are max matrix element for each dm set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); } void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->dm_cond) { free(opt->dm_cond); } opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset)); memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset)); set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); } // the current order of dmscond (dmll, dmss, dmsl) is consistent to the // function _call_veff_ssll in dhf.py void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->dm_cond) { free(opt->dm_cond); } if (nset < 3) { fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are " "required to set rkb prescreening\n"); exit(1); } nset = nset / 3; opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset)); memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*4*(1+nset)); // 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond int n2c = CINTtot_cgto_spinor(bas, nbas); double *dmcondll = opt->dm_cond + nbas*nbas*LL; double *dmcondss = opt->dm_cond + nbas*nbas*SS; double *dmcondsl = opt->dm_cond + nbas*nbas*SL; //double *dmcondls = opt->dm_cond + nbas*nbas*LS; double *pdmscond = opt->dm_cond + nbas*nbas*4; double *dmscondll = pdmscond + nset*nbas*nbas*LL; double *dmscondss = pdmscond + nset*nbas*nbas*SS; double *dmscondsl = pdmscond + nset*nbas*nbas*SL; //double *dmscondls = dmscond + nset*nbas*nbas*LS; double complex *dmll = dm + n2c*n2c*LL*nset; double complex *dmss = dm + n2c*n2c*SS*nset; double complex *dmsl = dm + n2c*n2c*SL*nset; //double complex *dmls = dm + n2c*n2c*LS*nset; set_dmcond(dmcondll, dmscondll, dmll, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); set_dmcond(dmcondss, dmscondss, dmss, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); set_dmcond(dmcondsl, dmscondsl, dmsl, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % snibgo (Alan Gibson) % % January 2022 % % % % % % % % Copyright @ 2022 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" #define MaxTokenLen 100 #define RpnInit 100 #define TableExtend 0.1 #define InitNumOprStack 50 #define MinValStackSize 100 #define InitNumUserSymbols 50 typedef long double fxFltType; typedef enum { oAddEq, oSubtractEq, oMultiplyEq, oDivideEq, oPlusPlus, oSubSub, oAdd, oSubtract, oMultiply, oDivide, oModulus, oUnaryPlus, oUnaryMinus, oLshift, oRshift, oEq, oNotEq, oLtEq, oGtEq, oLt, oGt, oLogAnd, oLogOr, oLogNot, oBitAnd, oBitOr, oBitNot, oPow, oQuery, oColon, oOpenParen, oCloseParen, oOpenBracket, oCloseBracket, oOpenBrace, oCloseBrace, oAssign, oNull } OperatorE; typedef struct { OperatorE op; const char * str; int precedence; /* Higher number is higher precedence */ int nArgs; } OperatorT; static const OperatorT Operators[] = { {oAddEq, "+=", 12, 1}, {oSubtractEq, "-=", 12, 1}, {oMultiplyEq, "*=", 13, 1}, {oDivideEq, "/=", 13, 1}, {oPlusPlus, "++", 12, 0}, {oSubSub, "--", 12, 0}, {oAdd, "+", 12, 2}, {oSubtract, "-", 12, 2}, {oMultiply, "*", 13, 2}, {oDivide, "/", 13, 2}, {oModulus, "%", 13, 2}, {oUnaryPlus, "+", 14, 1}, {oUnaryMinus, "-", 14, 1}, {oLshift, "<<", 11, 2}, {oRshift, ">>", 11, 2}, {oEq, "==", 9, 2}, {oNotEq, "!=", 9, 2}, {oLtEq, "<=", 10, 2}, {oGtEq, ">=", 10, 2}, {oLt, "<", 10, 2}, {oGt, ">", 10, 2}, {oLogAnd, "&&", 6, 2}, {oLogOr, "||", 5, 2}, {oLogNot, "!", 16, 1}, {oBitAnd, "&", 8, 2}, {oBitOr, "|", 7, 2}, {oBitNot, "~", 16, 1}, {oPow, "^", 15, 2}, {oQuery, "?", 4, 1}, {oColon, ":", 4, 1}, {oOpenParen, "(", 0, 0}, {oCloseParen, ")", 0, 0}, {oOpenBracket, "[", 0, 0}, {oCloseBracket,"]", 0, 0}, {oOpenBrace, "{", 0, 0}, {oCloseBrace, "}", 0, 0}, {oAssign, "=", 3, 1}, {oNull, "onull", 17, 0} }; typedef enum { cEpsilon, cE, cOpaque, cPhi, cPi, cQuantumRange, cQuantumScale, cTransparent, cMaxRgb, cNull } ConstantE; typedef struct { ConstantE cons; fxFltType val; const char * str; } ConstantT; static const ConstantT Constants[] = { {cEpsilon, MagickEpsilon, "epsilon"}, {cE, 2.7182818284590452354, "e"}, {cOpaque, 1.0, "opaque"}, {cPhi, MagickPHI, "phi"}, {cPi, MagickPI, "pi"}, {cQuantumRange, QuantumRange, "quantumrange"}, {cQuantumScale, QuantumScale, "quantumscale"}, {cTransparent, 0.0, "transparent"}, {cMaxRgb, QuantumRange, "MaxRGB"}, {cNull, 0.0, "cnull"} }; #define FirstFunc ((FunctionE) (oNull+1)) typedef enum { fAbs = oNull+1, #if defined(MAGICKCORE_HAVE_ACOSH) fAcosh, #endif fAcos, #if defined(MAGICKCORE_HAVE_J1) fAiry, #endif fAlt, #if defined(MAGICKCORE_HAVE_ASINH) fAsinh, #endif fAsin, #if defined(MAGICKCORE_HAVE_ATANH) fAtanh, #endif fAtan2, fAtan, fCeil, fChannel, fClamp, fCosh, fCos, fDebug, fDrc, #if defined(MAGICKCORE_HAVE_ERF) fErf, #endif fExp, fFloor, fGauss, fGcd, fHypot, fInt, fIsnan, #if defined(MAGICKCORE_HAVE_J0) fJ0, #endif #if defined(MAGICKCORE_HAVE_J1) fJ1, #endif #if defined(MAGICKCORE_HAVE_J1) fJinc, #endif fLn, fLogtwo, fLog, fMax, fMin, fMod, fNot, fPow, fRand, fRound, fSign, fSinc, fSinh, fSin, fSqrt, fSquish, fTanh, fTan, fTrunc, fDo, fFor, fIf, fWhile, fU, fU0, fUP, fS, fV, fP, fSP, fVP, fNull } FunctionE; typedef struct { FunctionE func; const char * str; int nArgs; } FunctionT; static const FunctionT Functions[] = { {fAbs, "abs" , 1}, #if defined(MAGICKCORE_HAVE_ACOSH) {fAcosh, "acosh" , 1}, #endif {fAcos, "acos" , 1}, #if defined(MAGICKCORE_HAVE_J1) {fAiry, "airy" , 1}, #endif {fAlt, "alt" , 1}, #if defined(MAGICKCORE_HAVE_ASINH) {fAsinh, "asinh" , 1}, #endif {fAsin, "asin" , 1}, #if defined(MAGICKCORE_HAVE_ATANH) {fAtanh, "atanh" , 1}, #endif {fAtan2, "atan2" , 2}, {fAtan, "atan" , 1}, {fCeil, "ceil" , 1}, {fChannel, "channel" , 5}, {fClamp, "clamp" , 1}, {fCosh, "cosh" , 1}, {fCos, "cos" , 1}, {fDebug, "debug" , 1}, {fDrc, "drc" , 2}, #if defined(MAGICKCORE_HAVE_ERF) {fErf, "erf" , 1}, #endif {fExp, "exp" , 1}, {fFloor, "floor" , 1}, {fGauss, "gauss" , 2}, {fGcd, "gcd" , 2}, {fHypot, "hypot" , 2}, {fInt, "int" , 1}, {fIsnan, "isnan" , 1}, #if defined(MAGICKCORE_HAVE_J0) {fJ0, "j0" , 1}, #endif #if defined(MAGICKCORE_HAVE_J1) {fJ1, "j1" , 1}, #endif #if defined(MAGICKCORE_HAVE_J1) {fJinc, "jinc" , 1}, #endif {fLn, "ln" , 1}, {fLogtwo, "logtwo", 1}, {fLog, "log" , 1}, {fMax, "max" , 2}, {fMin, "min" , 2}, {fMod, "mod" , 2}, {fNot, "not" , 1}, {fPow, "pow" , 2}, {fRand, "rand" , 0}, {fRound, "round" , 1}, {fSign, "sign" , 1}, {fSinc, "sinc" , 1}, {fSinh, "sinh" , 1}, {fSin, "sin" , 1}, {fSqrt, "sqrt" , 1}, {fSquish, "squish", 1}, {fTanh, "tanh" , 1}, {fTan, "tan" , 1}, {fTrunc, "trunc" , 1}, {fDo, "do", 2}, {fFor, "for", 3}, {fIf, "if", 3}, {fWhile, "while", 2}, {fU, "u", 1}, {fU0, "u0", 0}, {fUP, "up", 3}, {fS, "s", 0}, {fV, "v", 0}, {fP, "p", 2}, {fSP, "sp", 2}, {fVP, "vp", 2}, {fNull, "fnull" , 0} }; #define FirstImgAttr ((ImgAttrE) (fNull+1)) typedef enum { aDepth = fNull+1, aExtent, aKurtosis, aMaxima, aMean, aMedian, aMinima, aPage, aPageX, aPageY, aPageWid, aPageHt, aPrintsize, aPrintsizeX, aPrintsizeY, aQuality, aRes, aResX, aResY, aSkewness, aStdDev, aH, aN, aT, aW, aZ, aNull } ImgAttrE; typedef struct { ImgAttrE attr; const char * str; int NeedStats; } ImgAttrT; static const ImgAttrT ImgAttrs[] = { {aDepth, "depth", 1}, {aExtent, "extent", 0}, {aKurtosis, "kurtosis", 1}, {aMaxima, "maxima", 1}, {aMean, "mean", 1}, {aMedian, "median", 1}, {aMinima, "minima", 1}, {aPage, "page", 0}, {aPageX, "page.x", 0}, {aPageY, "page.y", 0}, {aPageWid, "page.width", 0}, {aPageHt, "page.height", 0}, {aPrintsize, "printsize", 0}, {aPrintsizeX, "printsize.x", 0}, {aPrintsizeY, "printsize.y", 0}, {aQuality, "quality", 0}, {aRes, "resolution", 0}, {aResX, "resolution.x", 0}, {aResY, "resolution.y", 0}, {aSkewness, "skewness", 1}, {aStdDev, "standard_deviation", 1}, {aH, "h", 0}, {aN, "n", 0}, {aT, "t", 0}, {aW, "w", 0}, {aZ, "z", 0}, {aNull, "anull", 0} }; #define FirstSym ((SymbolE) (aNull+1)) typedef enum { sHue = aNull+1, sIntensity, sLightness, sLuma, sLuminance, sSaturation, sA, sB, sC, sG, sI, sJ, sK, sM, sO, sR, sY, sNull } SymbolE; typedef struct { SymbolE sym; const char * str; } SymbolT; static const SymbolT Symbols[] = { {sHue, "hue"}, {sIntensity, "intensity"}, {sLightness, "lightness"}, {sLuma, "luma"}, {sLuminance, "luminance"}, {sSaturation, "saturation"}, {sA, "a"}, {sB, "b"}, {sC, "c"}, {sG, "g"}, {sI, "i"}, {sJ, "j"}, {sK, "k"}, {sM, "m"}, {sO, "o"}, {sR, "r"}, {sY, "y"}, {sNull, "snull"} }; /* There is no way to access new value of pixels. This might be a future enhancement, eg "q". fP, oU and oV can have channel qualifier such as "u.r". For meta channels, we might also allow numbered channels eg "u.2" or "u.16". ... or have extra argument to p[]. */ #define FirstCont (sNull+1) /* Run-time controls are in the RPN, not explicitly in the input string. */ typedef enum { rGoto = FirstCont, rIfZeroGoto, rIfNotZeroGoto, rCopyFrom, rCopyTo, rZerStk, rNull } ControlE; typedef struct { ControlE cont; const char * str; int nArgs; } ControlT; static const ControlT Controls[] = { {rGoto, "goto", 0}, {rIfZeroGoto, "ifzerogoto", 1}, {rIfNotZeroGoto, "ifnotzerogoto", 1}, {rCopyFrom, "copyfrom", 0}, {rCopyTo, "copyto", 1}, {rZerStk, "zerstk", 0}, {rNull, "rnull", 0} }; #define NULL_ADDRESS -2 typedef struct { int addrQuery; int addrColon; } TernaryT; typedef struct { const char * str; PixelChannel pixChan; } ChannelT; #define NO_CHAN_QUAL ((PixelChannel) (-1)) #define THIS_CHANNEL ((PixelChannel) (-2)) #define HUE_CHANNEL ((PixelChannel) (-3)) #define SAT_CHANNEL ((PixelChannel) (-4)) #define LIGHT_CHANNEL ((PixelChannel) (-5)) #define INTENSITY_CHANNEL ((PixelChannel) (-6)) static const ChannelT Channels[] = { {"r", RedPixelChannel}, {"g", GreenPixelChannel}, {"b", BluePixelChannel}, {"c", CyanPixelChannel}, {"m", MagentaPixelChannel}, {"y", YellowPixelChannel}, {"k", BlackPixelChannel}, {"a", AlphaPixelChannel}, {"o", AlphaPixelChannel}, {"hue", HUE_CHANNEL}, {"saturation", SAT_CHANNEL}, {"lightness", LIGHT_CHANNEL}, {"intensity", INTENSITY_CHANNEL}, {"all", CompositePixelChannel}, {"this", THIS_CHANNEL}, {"", NO_CHAN_QUAL} }; /* The index into UserSymbols is also the index into run-time UserSymVals. */ typedef struct { char * pex; size_t len; } UserSymbolT; typedef enum { etOperator, etConstant, etFunction, etImgAttr, etSymbol, etColourConstant, etControl } ElementTypeE; static const char * sElementTypes[] = { "Operator", "Constant", "Function", "ImgAttr", "Symbol", "ColConst", "Control" }; typedef struct { ElementTypeE type; fxFltType val, val1, val2; int oprNum; int nArgs; MagickBooleanType IsRelative; MagickBooleanType DoPush; int EleNdx; int nDest; /* Number of Elements that "goto" this element */ PixelChannel ChannelQual; ImgAttrE ImgAttrQual; char * pExpStart; int lenExp; } ElementT; typedef enum { rtUnknown, rtEntireImage, rtCornerOnly } RunTypeE; typedef struct { CacheView *View; /* Other per-image metadata could go here. */ } ImgT; typedef struct { RandomInfo * magick_restrict random_info; int numValStack; int usedValStack; fxFltType * ValStack; fxFltType * UserSymVals; Quantum * thisPixel; } fxRtT; struct _FxInfo { Image * image; size_t ImgListLen; ssize_t ImgNum; MagickBooleanType NeedStats; MagickBooleanType GotStats; MagickBooleanType NeedHsl; MagickBooleanType DebugOpt; /* Whether "-debug" option is in effect */ MagickBooleanType ContainsDebug; /* Whether expression contains "debug ()" function */ char * expression; char * pex; char ShortExp[MagickPathExtent]; /* for reporting */ int teDepth; char token[MagickPathExtent]; size_t lenToken; int numElements; int usedElements; ElementT * Elements; /* Elements is read-only at runtime. */ int numUserSymbols; int usedUserSymbols; UserSymbolT * UserSymbols; int numOprStack; int usedOprStack; int maxUsedOprStack; OperatorE * OperatorStack; ChannelStatistics ** statistics; int precision; RunTypeE runType; RandomInfo **magick_restrict random_infos; ImgT * Imgs; Image ** Images; ExceptionInfo * exception; fxRtT * fxrts; }; /* Forward declarations for recursion. */ static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit); static MagickBooleanType TranslateExpression (FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll); static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe); static MagickBooleanType InitFx (FxInfo * pfx, const Image * img, MagickBooleanType CalcAllStats, ExceptionInfo *exception) { ssize_t i=0; const Image * next; pfx->ImgListLen = GetImageListLength (img); pfx->ImgNum = GetImageIndexInList (img); pfx->image = (Image *)img; pfx->NeedStats = MagickFalse; pfx->GotStats = MagickFalse; pfx->NeedHsl = MagickFalse; pfx->DebugOpt = IsStringTrue (GetImageArtifact (img, "fx:debug")); pfx->statistics = NULL; pfx->Imgs = NULL; pfx->Images = NULL; pfx->exception = exception; pfx->precision = GetMagickPrecision (); pfx->random_infos = AcquireRandomInfoTLS (); pfx->ContainsDebug = MagickFalse; pfx->runType = (CalcAllStats) ? rtEntireImage : rtCornerOnly; pfx->Imgs = (ImgT *)AcquireQuantumMemory (pfx->ImgListLen, sizeof (ImgT)); if (!pfx->Imgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Imgs", "%lu", pfx->ImgListLen); return MagickFalse; } next = GetFirstImageInList (img); for ( ; next != (Image *) NULL; next=next->next) { ImgT * pimg = &pfx->Imgs[i]; pimg->View = AcquireVirtualCacheView (next, pfx->exception); if (!pimg->View) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "View", "[%li]", i); /* dealloc any done so far, and Imgs */ for ( ; i > 0; i--) { pimg = &pfx->Imgs[i-1]; pimg->View = DestroyCacheView (pimg->View); } pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs); return MagickFalse; } i++; } pfx->Images = ImageListToArray (img, pfx->exception); return MagickTrue; } static MagickBooleanType DeInitFx (FxInfo * pfx) { ssize_t i; if (pfx->Images) pfx->Images = (Image**) RelinquishMagickMemory (pfx->Images); if (pfx->Imgs) { for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) { ImgT * pimg = &pfx->Imgs[i-1]; pimg->View = DestroyCacheView (pimg->View); } pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs); } pfx->random_infos = DestroyRandomInfoTLS (pfx->random_infos); if (pfx->statistics) { for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) { pfx->statistics[i-1]=(ChannelStatistics *) RelinquishMagickMemory (pfx->statistics[i-1]); } pfx->statistics = (ChannelStatistics**) RelinquishMagickMemory(pfx->statistics); } return MagickTrue; } static ElementTypeE TypeOfOpr (int op) { if (op < oNull) return etOperator; if (op == oNull) return etConstant; if (op <= fNull) return etFunction; if (op <= aNull) return etImgAttr; if (op <= sNull) return etSymbol; if (op <= rNull) return etControl; return (ElementTypeE) 0; } static char * SetPtrShortExp (FxInfo * pfx, char * pExp, size_t len) { #define MaxLen 20 size_t slen; char * p; *pfx->ShortExp = '\0'; if (pExp && len) { slen = CopyMagickString (pfx->ShortExp, pExp, len); if (slen > MaxLen) { (void) CopyMagickString (pfx->ShortExp+MaxLen, "...", 4); } p = strchr (pfx->ShortExp, '\n'); if (p) (void) CopyMagickString (p, "...", 4); p = strchr (pfx->ShortExp, '\r'); if (p) (void) CopyMagickString (p, "...", 4); } return pfx->ShortExp; } static char * SetShortExp (FxInfo * pfx) { return SetPtrShortExp (pfx, pfx->pex, MaxTokenLen-1); } static int FindUserSymbol (FxInfo * pfx, char * name) /* returns index into pfx->UserSymbols, and thus into pfxrt->UserSymVals, or NULL_ADDRESS if not found. */ { int i; size_t lenName; lenName = strlen (name); for (i=0; i < pfx->usedUserSymbols; i++) { UserSymbolT *pus = &pfx->UserSymbols[i]; if (lenName == pus->len && LocaleNCompare (name, pus->pex, lenName)==0) break; } if (i == pfx->usedUserSymbols) return NULL_ADDRESS; return i; } static MagickBooleanType ExtendUserSymbols (FxInfo * pfx) { pfx->numUserSymbols = (int) ceil (pfx->numUserSymbols * (1 + TableExtend)); pfx->UserSymbols = (UserSymbolT*) ResizeMagickMemory (pfx->UserSymbols, pfx->numUserSymbols * sizeof(UserSymbolT)); if (!pfx->UserSymbols) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymbols", "%i", pfx->numUserSymbols); return MagickFalse; } return MagickTrue; } static int AddUserSymbol (FxInfo * pfx, char * pex, size_t len) { UserSymbolT *pus; if (++pfx->usedUserSymbols >= pfx->numUserSymbols) { if (!ExtendUserSymbols (pfx)) return -1; } pus = &pfx->UserSymbols[pfx->usedUserSymbols-1]; pus->pex = pex; pus->len = len; return pfx->usedUserSymbols-1; } static void DumpTables (FILE * fh) { int i; for (i=0; i <= rNull; i++) { const char * str = ""; if ( i < oNull) str = Operators[i].str; if (i >= FirstFunc && i < fNull) str = Functions[i-FirstFunc].str; if (i >= FirstImgAttr && i < aNull) str = ImgAttrs[i-FirstImgAttr].str; if (i >= FirstSym && i < sNull) str = Symbols[i-FirstSym].str; if (i >= FirstCont && i < rNull) str = Controls[i-FirstCont].str; if (i==0 ) fprintf (stderr, "Operators:\n "); else if (i==oNull) fprintf (stderr, "\nFunctions:\n "); else if (i==fNull) fprintf (stderr, "\nImage attributes:\n "); else if (i==aNull) fprintf (stderr, "\nSymbols:\n "); else if (i==sNull) fprintf (stderr, "\nControls:\n "); fprintf (fh, " %s", str); } fprintf (fh, "\n"); } static char * NameOfUserSym (FxInfo * pfx, int ndx, char * buf) { UserSymbolT * pus; assert (ndx >= 0 && ndx < pfx->usedUserSymbols); pus = &pfx->UserSymbols[ndx]; (void) CopyMagickString (buf, pus->pex, pus->len+1); return buf; } static void DumpUserSymbols (FxInfo * pfx, FILE * fh) { char UserSym[MagickPathExtent]; int i; fprintf (fh, "UserSymbols (%i)\n", pfx->usedUserSymbols); for (i=0; i < pfx->usedUserSymbols; i++) { fprintf (fh, " %i: '%s'\n", i, NameOfUserSym (pfx, i, UserSym)); } } static MagickBooleanType BuildRPN (FxInfo * pfx) { pfx->numUserSymbols = InitNumUserSymbols; pfx->usedUserSymbols = 0; pfx->UserSymbols = (UserSymbolT*) AcquireMagickMemory (pfx->numUserSymbols * sizeof(UserSymbolT)); if (!pfx->UserSymbols) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymbols", "%i", pfx->numUserSymbols); return MagickFalse; } pfx->numElements = RpnInit; pfx->usedElements = 0; pfx->Elements = NULL; pfx->Elements = (ElementT*) AcquireMagickMemory (pfx->numElements * sizeof(ElementT)); if (!pfx->Elements) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Elements", "%i", pfx->numElements); return MagickFalse; } pfx->usedOprStack = 0; pfx->maxUsedOprStack = 0; pfx->numOprStack = InitNumOprStack; pfx->OperatorStack = (OperatorE*) AcquireMagickMemory (pfx->numOprStack * sizeof(OperatorE)); if (!pfx->OperatorStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "OperatorStack", "%i", pfx->numOprStack); return MagickFalse; } return MagickTrue; } static MagickBooleanType AllocFxRt (FxInfo * pfx, fxRtT * pfxrt) { int nRnd; int i; pfxrt->random_info = AcquireRandomInfo (); pfxrt->thisPixel = NULL; nRnd = 20 + 10 * (int) GetPseudoRandomValue (pfxrt->random_info); for (i=0; i < nRnd; i++) (void) GetPseudoRandomValue (pfxrt->random_info);; pfxrt->usedValStack = 0; pfxrt->numValStack = 2 * pfx->maxUsedOprStack; if (pfxrt->numValStack < MinValStackSize) pfxrt->numValStack = MinValStackSize; pfxrt->ValStack = (fxFltType*) AcquireMagickMemory (pfxrt->numValStack * sizeof(fxFltType)); if (!pfxrt->ValStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "ValStack", "%i", pfxrt->numValStack); return MagickFalse; } pfxrt->UserSymVals = NULL; if (pfx->usedUserSymbols) { pfxrt->UserSymVals = (fxFltType*) AcquireMagickMemory (pfx->usedUserSymbols * sizeof(fxFltType)); if (!pfxrt->UserSymVals) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymVals", "%i", pfx->usedUserSymbols); return MagickFalse; } for (i = 0; i < pfx->usedUserSymbols; i++) pfxrt->UserSymVals[i] = (fxFltType) 0; } return MagickTrue; } static MagickBooleanType ExtendRPN (FxInfo * pfx) { pfx->numElements = (int) ceil (pfx->numElements * (1 + TableExtend)); pfx->Elements = (ElementT*) ResizeMagickMemory (pfx->Elements, pfx->numElements * sizeof(ElementT)); if (!pfx->Elements) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Elements", "%i", pfx->numElements); return MagickFalse; } return MagickTrue; } static MagickBooleanType inline OprInPlace (int op) { return (op >= oAddEq && op <= oSubSub ? MagickTrue : MagickFalse); } static const char * OprStr (int oprNum) { const char * str; if (oprNum < 0) str = "bad OprStr"; else if (oprNum <= oNull) str = Operators[oprNum].str; else if (oprNum <= fNull) str = Functions[oprNum-FirstFunc].str; else if (oprNum <= aNull) str = ImgAttrs[oprNum-FirstImgAttr].str; else if (oprNum <= sNull) str = Symbols[oprNum-FirstSym].str; else if (oprNum <= rNull) str = Controls[oprNum-FirstCont].str; else { str = "bad OprStr"; } return str; } static MagickBooleanType DumpRPN (FxInfo * pfx, FILE * fh) { int i; fprintf (fh, "DumpRPN:"); fprintf (fh, " numElements=%i", pfx->numElements); fprintf (fh, " usedElements=%i", pfx->usedElements); fprintf (fh, " maxUsedOprStack=%i", pfx->maxUsedOprStack); fprintf (fh, " ImgListLen=%g", (double) pfx->ImgListLen); fprintf (fh, " NeedStats=%s", pfx->NeedStats ? "yes" : "no"); fprintf (fh, " GotStats=%s", pfx->GotStats ? "yes" : "no"); fprintf (fh, " NeedHsl=%s\n", pfx->NeedHsl ? "yes" : "no"); if (pfx->runType==rtEntireImage) fprintf (stderr, "EntireImage"); else if (pfx->runType==rtCornerOnly) fprintf (stderr, "CornerOnly"); fprintf (fh, "\n"); for (i=0; i < pfx->usedElements; i++) { ElementT * pel = &pfx->Elements[i]; pel->nDest = 0; } for (i=0; i < pfx->usedElements; i++) { ElementT * pel = &pfx->Elements[i]; if (pel->oprNum == rGoto || pel->oprNum == rIfZeroGoto || pel->oprNum == rIfNotZeroGoto) { if (pel->EleNdx >= 0 && pel->EleNdx < pfx->numElements) { ElementT * pelDest = &pfx->Elements[pel->EleNdx]; pelDest->nDest++; } } } for (i=0; i < pfx->usedElements; i++) { char UserSym[MagickPathExtent]; ElementT * pel = &pfx->Elements[i]; const char * str = OprStr (pel->oprNum); const char *sRelAbs = ""; if (pel->oprNum == fP || pel->oprNum == fUP || pel->oprNum == fVP || pel->oprNum == fSP) sRelAbs = pel->IsRelative ? "[]" : "{}"; if (pel->type == etColourConstant) fprintf (fh, " %i: %s vals=%.*Lg,%.*Lg,%.*Lg '%s%s' nArgs=%i ndx=%i %s", i, sElementTypes[pel->type], pfx->precision, pel->val, pfx->precision, pel->val1, pfx->precision, pel->val2, str, sRelAbs, pel->nArgs, pel->EleNdx, pel->DoPush ? "push" : "NO push"); else fprintf (fh, " %i: %s val=%.*Lg '%s%s' nArgs=%i ndx=%i %s", i, sElementTypes[pel->type], pfx->precision, pel->val, str, sRelAbs, pel->nArgs, pel->EleNdx, pel->DoPush ? "push" : "NO push"); if (pel->ImgAttrQual != aNull) fprintf (fh, " ia=%s", OprStr(pel->ImgAttrQual)); if (pel->ChannelQual != NO_CHAN_QUAL) { if (pel->ChannelQual == THIS_CHANNEL) fprintf (stderr, " ch=this"); else fprintf (stderr, " ch=%i", pel->ChannelQual); } if (pel->oprNum == rCopyTo) { fprintf (fh, " CopyTo ==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } else if (pel->oprNum == rCopyFrom) { fprintf (fh, " CopyFrom <== %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } else if (OprInPlace (pel->oprNum)) { fprintf (fh, " <==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } if (pel->nDest > 0) fprintf (fh, " <==dest(%i)", pel->nDest); fprintf (fh, "\n"); } return MagickTrue; } static void DestroyRPN (FxInfo * pfx) { pfx->numOprStack = 0; pfx->usedOprStack = 0; if (pfx->OperatorStack) pfx->OperatorStack = (OperatorE*) RelinquishMagickMemory (pfx->OperatorStack); pfx->numElements = 0; pfx->usedElements = 0; if (pfx->Elements) pfx->Elements = (ElementT*) RelinquishMagickMemory (pfx->Elements); pfx->usedUserSymbols = 0; if (pfx->UserSymbols) pfx->UserSymbols = (UserSymbolT*) RelinquishMagickMemory (pfx->UserSymbols); } static void DestroyFxRt (fxRtT * pfxrt) { pfxrt->usedValStack = 0; if (pfxrt->ValStack) pfxrt->ValStack = (fxFltType*) RelinquishMagickMemory (pfxrt->ValStack); if (pfxrt->UserSymVals) pfxrt->UserSymVals = (fxFltType*) RelinquishMagickMemory (pfxrt->UserSymVals); pfxrt->random_info = DestroyRandomInfo (pfxrt->random_info); } static size_t GetToken (FxInfo * pfx) /* Returns length of token that starts with an alpha, or 0 if it isn't a token that starts with an alpha. j0 and j1 have trailing digit. Also colours like "gray47" have more trailing digits. After intial alpha(s) also allow single "_", eg "standard_deviation". Does not advance pfx->pex. This splits "mean.r" etc. */ { char * p = pfx->pex; size_t len = 0; *pfx->token = '\0'; pfx->lenToken = 0; if (!isalpha((int)*p)) return 0; /* Regard strings that start "icc-" or "device-", followed by any number of alphas, as a token. */ if (LocaleNCompare (p, "icc-", 4) == 0) { len = 4; p += 4; while (isalpha ((int)*p)) { len++; p++; } } else if (LocaleNCompare (p, "device-", 7) == 0) { len = 7; p += 7; while (isalpha ((int)*p)) { len++; p++; } } else { while (isalpha ((int)*p)) { len++; p++; } if (*p == '_') { len++; p++; } while (isalpha ((int)*p)) { len++; p++; } while (isdigit ((int)*p)) { len++; p++; } } if (len >= MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetToken: too long", "%g at '%s'", (double) len, SetShortExp(pfx)); len = MaxTokenLen; } if (len) { (void) CopyMagickString (pfx->token, pfx->pex, (len+1<MaxTokenLen)?len+1:MaxTokenLen); } pfx->lenToken = strlen (pfx->token); return len; } static MagickBooleanType TokenMaybeUserSymbol (FxInfo * pfx) { char * p = pfx->token; int i = 0; while (*p) { if (!isalpha ((int)*p++)) return MagickFalse; i++; } if (i < 2) return MagickFalse; return MagickTrue; } static MagickBooleanType AddElement (FxInfo * pfx, fxFltType val, int oprNum) { ElementT * pel; assert (oprNum <= rNull); if (++pfx->usedElements >= pfx->numElements) { if (!ExtendRPN (pfx)) return MagickFalse; } pel = &pfx->Elements[pfx->usedElements-1]; pel->type = TypeOfOpr (oprNum); pel->val = val; pel->val1 = (fxFltType) 0; pel->val2 = (fxFltType) 0; pel->oprNum = oprNum; pel->DoPush = MagickTrue; pel->EleNdx = 0; pel->ChannelQual = NO_CHAN_QUAL; pel->ImgAttrQual = aNull; pel->nDest = 0; pel->pExpStart = NULL; pel->lenExp = 0; if (oprNum <= oNull) pel->nArgs = Operators[oprNum].nArgs; else if (oprNum <= fNull) pel->nArgs = Functions[oprNum-FirstFunc].nArgs; else if (oprNum <= aNull) pel->nArgs = 0; else if (oprNum <= sNull) pel->nArgs = 0; else pel->nArgs = Controls[oprNum-FirstCont].nArgs; return MagickTrue; } static MagickBooleanType AddAddressingElement (FxInfo * pfx, int oprNum, int EleNdx) { ElementT * pel; if (!AddElement (pfx, (fxFltType) 0, oprNum)) return MagickFalse; pel = &pfx->Elements[pfx->usedElements-1]; pel->EleNdx = EleNdx; if (oprNum == rGoto || oprNum == rIfZeroGoto || oprNum == rIfNotZeroGoto || oprNum == rZerStk) { pel->DoPush = MagickFalse; } /* Note: for() may or may not need pushing, depending on whether the value is needed, eg "for(...)+2" or debug(for(...)). */ return MagickTrue; } static MagickBooleanType AddColourElement (FxInfo * pfx, fxFltType val0, fxFltType val1, fxFltType val2) { ElementT * pel; if (!AddElement (pfx, val0, oNull)) return MagickFalse; pel = &pfx->Elements[pfx->usedElements-1]; pel->val1 = val1; pel->val2 = val2; pel->type = etColourConstant; return MagickTrue; } static void inline SkipSpaces (FxInfo * pfx) { while (isspace ((int)*pfx->pex)) pfx->pex++; } static char inline PeekChar (FxInfo * pfx) { SkipSpaces (pfx); return *pfx->pex; } static MagickBooleanType inline PeekStr (FxInfo * pfx, const char * str) { SkipSpaces (pfx); return (LocaleNCompare (pfx->pex, str, strlen(str))==0 ? MagickTrue : MagickFalse); } static MagickBooleanType ExpectChar (FxInfo * pfx, char c) { if (PeekChar (pfx) != c) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected char", "'%c' at '%s'", c, SetShortExp (pfx)); return MagickFalse; } pfx->pex++; return MagickTrue; } static int MaybeXYWH (FxInfo * pfx, ImgAttrE * pop) /* If ".x" or ".y" or ".width" or ".height" increments *pop and returns 1 to 4 . Otherwise returns 0. */ { int ret=0; if (*pop != aPage && *pop != aPrintsize && *pop != aRes) return 0; if (PeekChar (pfx) != '.') return 0; if (!ExpectChar (pfx, '.')) return 0; (void) GetToken (pfx); if (LocaleCompare ("x", pfx->token)==0) ret=1; else if (LocaleCompare ("y", pfx->token)==0) ret=2; else if (LocaleCompare ("width", pfx->token)==0) ret=3; else if (LocaleCompare ("height", pfx->token)==0) ret=4; if (!ret) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Invalid 'x' or 'y' or 'width' or 'height' token=", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); if (*pop == aPage) (*pop) = (ImgAttrE) (*pop + ret); else { if (ret > 2) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Invalid 'width' or 'height' token=", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); } else { (*pop) = (ImgAttrE) (*pop + ret); } } pfx->pex+=pfx->lenToken; return ret; } static MagickBooleanType ExtendOperatorStack (FxInfo * pfx) { pfx->numOprStack = (int) ceil (pfx->numOprStack * (1 + TableExtend)); pfx->OperatorStack = (OperatorE*) ResizeMagickMemory (pfx->OperatorStack, pfx->numOprStack * sizeof(OperatorE)); if (!pfx->OperatorStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "OprStack", "%i", pfx->numOprStack); return MagickFalse; } return MagickTrue; } static MagickBooleanType PushOperatorStack (FxInfo * pfx, int op) { if (++pfx->usedOprStack >= pfx->numOprStack) { if (!ExtendOperatorStack (pfx)) return MagickFalse; } pfx->OperatorStack[pfx->usedOprStack-1] = (OperatorE) op; if (pfx->maxUsedOprStack < pfx->usedOprStack) pfx->maxUsedOprStack = pfx->usedOprStack; return MagickTrue; } static OperatorE GetLeadingOp (FxInfo * pfx) { OperatorE op = oNull; if (*pfx->pex == '-') op = oUnaryMinus; else if (*pfx->pex == '+') op = oUnaryPlus; else if (*pfx->pex == '~') op = oBitNot; else if (*pfx->pex == '!') op = oLogNot; else if (*pfx->pex == '(') op = oOpenParen; return op; } static MagickBooleanType inline OprIsUnaryPrefix (OperatorE op) { return (op == oUnaryMinus || op == oUnaryPlus || op == oBitNot || op == oLogNot ? MagickTrue : MagickFalse); } static MagickBooleanType TopOprIsUnaryPrefix (FxInfo * pfx) { if (!pfx->usedOprStack) return MagickFalse; return OprIsUnaryPrefix (pfx->OperatorStack[pfx->usedOprStack-1]); } static MagickBooleanType PopOprOpenParen (FxInfo * pfx, OperatorE op) { if (!pfx->usedOprStack) return MagickFalse; if (pfx->OperatorStack[pfx->usedOprStack-1] != op) return MagickFalse; pfx->usedOprStack--; return MagickTrue; } static int GetCoordQualifier (FxInfo * pfx, int op) /* Returns -1 if invalid CoordQualifier, +1 if valid and appropriate. */ { if (op != fU && op != fV && op != fS) return -1; (void) GetToken (pfx); if (pfx->lenToken != 1) { return -1; } if (*pfx->token != 'p' && *pfx->token != 'P') return -1; if (!GetFunction (pfx, fP)) return -1; return 1; } static PixelChannel GetChannelQualifier (FxInfo * pfx, int op) { if (op == fU || op == fV || op == fP || op == fUP || op == fVP || op == fS || (op >= FirstImgAttr && op <= aNull) ) { const ChannelT * pch = &Channels[0]; (void) GetToken (pfx); while (*pch->str) { if (LocaleCompare (pch->str, pfx->token)==0) { if (op >= FirstImgAttr && op <= (OperatorE)aNull && (pch->pixChan == HUE_CHANNEL || pch->pixChan == SAT_CHANNEL || pch->pixChan == LIGHT_CHANNEL) ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Can't have image attribute with HLS qualifier at", "'%s'", SetShortExp(pfx)); return NO_CHAN_QUAL; } pfx->pex += pfx->lenToken; return pch->pixChan; } pch++; } } return NO_CHAN_QUAL; } static ImgAttrE GetImgAttrToken (FxInfo * pfx) { ImgAttrE ia = aNull; const char * iaStr; for (ia = FirstImgAttr; ia < aNull; ia=(ImgAttrE) (ia+1)) { iaStr = ImgAttrs[ia-FirstImgAttr].str; if (LocaleCompare (iaStr, pfx->token)==0) { pfx->pex += strlen(pfx->token); if (ImgAttrs[ia-FirstImgAttr].NeedStats == 1) pfx->NeedStats = MagickTrue; MaybeXYWH (pfx, &ia); break; } } if (ia == aPage || ia == aPrintsize || ia == aRes) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attribute", "'%s' needs qualifier at '%s'", iaStr, SetShortExp(pfx)); } return ia; } static ImgAttrE GetImgAttrQualifier (FxInfo * pfx, int op) { ImgAttrE ia = aNull; if (op == (OperatorE)fU || op == (OperatorE)fV || op == (OperatorE)fP || op == (OperatorE)fS) { (void) GetToken (pfx); if (pfx->lenToken == 0) { return aNull; } ia = GetImgAttrToken (pfx); } return ia; } static MagickBooleanType IsQualifier (FxInfo * pfx) { if (PeekChar (pfx) == '.') { pfx->pex++; return MagickTrue; } return MagickFalse; } static ssize_t GetProperty (FxInfo * pfx, fxFltType *val) /* returns number of character to swallow. "-1" means invalid input "0" means no relevant input (don't swallow, but not an error) */ { if (PeekStr (pfx, "%[")) { int level = 0; size_t len; char sProperty [MagickPathExtent]; char * p = pfx->pex + 2; while (*p) { if (*p == '[') level++; else if (*p == ']') { if (level == 0) break; level--; } p++; } if (!*p || level != 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After '%[' expected ']' at", "'%s'", SetShortExp(pfx)); return -1; } len = (size_t) (p - pfx->pex + 1); if (len > MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Too much text between '%[' and ']' at", "'%s'", SetShortExp(pfx)); return -1; } (void) CopyMagickString (sProperty, pfx->pex, len+1); sProperty[len] = '\0'; { char * tailptr; char * text; text = InterpretImageProperties (pfx->image->image_info, pfx->image, sProperty, pfx->exception); if (!text || !*text) { text = DestroyString(text); (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Unknown property", "'%s' at '%s'", sProperty, SetShortExp(pfx)); return -1; } *val = strtold (text, &tailptr); if (text == tailptr) { text = DestroyString(text); (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Property", "'%s' text '%s' is not a number at '%s'", sProperty, text, SetShortExp(pfx)); return -1; } text = DestroyString(text); } return ((ssize_t) len); } return 0; } static ssize_t inline GetConstantColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2) /* Finds named colour such as "blue" and colorspace function such as "lab(10,20,30)". Returns number of characters to swallow. Return -1 means apparantly a constant colour, but with an error. Return 0 means not a constant colour, but not an error. */ { PixelInfo colour; ExceptionInfo *dummy_exception = AcquireExceptionInfo (); char *p; MagickBooleanType IsGray, IsIcc, IsDev; char ColSp[MagickPathExtent]; (void) CopyMagickString (ColSp, pfx->token, MaxTokenLen); p = ColSp + pfx->lenToken - 1; if (*p == 'a' || *p == 'A') *p = '\0'; (void) GetPixelInfo (pfx->image, &colour); /* "gray" is both a colorspace and a named colour. */ IsGray = (LocaleCompare (ColSp, "gray") == 0) ? MagickTrue : MagickFalse; IsIcc = (LocaleCompare (ColSp, "icc-color") == 0) ? MagickTrue : MagickFalse; IsDev = (LocaleNCompare (ColSp, "device-", 7) == 0) ? MagickTrue : MagickFalse; /* QueryColorCompliance will raise a warning if it isn't a colour, so we discard any exceptions. */ if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, dummy_exception) || IsGray) { ssize_t type = ParseCommandOption (MagickColorspaceOptions, MagickFalse, ColSp); if (type >= 0 || IsIcc || IsDev) { char * q = pfx->pex + pfx->lenToken; while (isspace((int) ((unsigned char) *q))) q++; if (*q == '(') { size_t lenfun; char sFunc[MagickPathExtent]; while (*q && *q != ')') q++; if (!*q) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "constant color missing ')'", "at '%s'", SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } lenfun = (size_t) (q - pfx->pex + 1); if (lenfun > MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "lenfun too long", "'%lu' at '%s'", lenfun, SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } (void) CopyMagickString (sFunc, pfx->pex, lenfun+1); if (QueryColorCompliance (sFunc, AllCompliance, &colour, dummy_exception)) { *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; dummy_exception = DestroyExceptionInfo (dummy_exception); return (ssize_t)lenfun; } } else { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "colorspace but not a valid color with '(...)' at", "'%s'", SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } } if (!IsGray) { dummy_exception = DestroyExceptionInfo (dummy_exception); return 0; } } *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; dummy_exception = DestroyExceptionInfo (dummy_exception); return (ssize_t)strlen (pfx->token); } static ssize_t inline GetHexColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2) /* Returns number of characters to swallow. Negative return means it starts with '#', but invalid hex number. */ { char * p; size_t len; PixelInfo colour; if (*pfx->pex != '#') return 0; /* find end of hex digits. */ p = pfx->pex + 1; while (isxdigit ((int)*p)) p++; if (isalpha ((int)*p)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad hex number at", "'%s'", SetShortExp(pfx)); return -1; } len = (size_t) (p - pfx->pex); if (len < 1) return 0; if (len >= MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Hex colour too long at", "'%s'", SetShortExp(pfx)); return -1; } (void) CopyMagickString (pfx->token, pfx->pex, len+1); (void) GetPixelInfo (pfx->image, &colour); if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "QueryColorCompliance rejected", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return -1; } *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; return (ssize_t) len; } static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe) { /* A function, so get open-parens, n args, close-parens */ const char * funStr = Functions[fe-FirstFunc].str; int nArgs = Functions[fe-FirstFunc].nArgs; char chLimit = ')'; char expChLimit = ')'; const char *strLimit = ",)"; OperatorE pushOp = oOpenParen; char * pExpStart; int lenExp = 0; int FndArgs = 0; int ndx0 = NULL_ADDRESS, ndx1 = NULL_ADDRESS, ndx2 = NULL_ADDRESS, ndx3 = NULL_ADDRESS; MagickBooleanType coordQual = MagickFalse; PixelChannel chQual = NO_CHAN_QUAL; ImgAttrE iaQual = aNull; pfx->pex += pfx->lenToken; if (fe == fP) { char p = PeekChar (pfx); if (p=='{') { (void) ExpectChar (pfx, '{'); pushOp = oOpenBrace; strLimit = ",}"; chLimit = '}'; expChLimit = '}'; } else if (p=='[') { (void) ExpectChar (pfx, '['); pushOp = oOpenBracket; strLimit = ",]"; chLimit = ']'; expChLimit = ']'; } else { nArgs = 0; chLimit = ']'; expChLimit = ']'; } } else if (fe == fU) { char p = PeekChar (pfx); if (p=='[') { (void) ExpectChar (pfx, '['); pushOp = oOpenBracket; strLimit = ",]"; chLimit = ']'; expChLimit = ']'; } else { nArgs = 0; chLimit = ']'; expChLimit = ']'; } } else if (fe == fV || fe == fS) { nArgs = 0; pushOp = oOpenBracket; chLimit = ']'; expChLimit = ']'; } else { if (!ExpectChar (pfx, '(')) return MagickFalse; } if (!PushOperatorStack (pfx, pushOp)) return MagickFalse; pExpStart = pfx->pex; ndx0 = pfx->usedElements; if (fe==fDo) { (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx1+1 */ } while (nArgs > 0) { int FndOne = 0; if (TranslateStatementList (pfx, strLimit, &chLimit)) { FndOne = 1; } else { /* Maybe don't break because other expressions may be not empty. */ if (!chLimit) break; if (fe == fP || fe == fS|| fe == fIf) { (void) AddElement (pfx, (fxFltType) 0, oNull); FndOne = 1; } } if (strchr (strLimit, chLimit)==NULL) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected one of '%s' after expression but found '%c' at '%s'", funStr, strLimit, chLimit ? chLimit : ' ', SetShortExp(pfx)); return MagickFalse; } if (FndOne) { FndArgs++; nArgs--; } switch (FndArgs) { case 1: ndx1 = pfx->usedElements; if (fe==fWhile) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */ } else if (fe==fDo) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */ } else if (fe==fFor) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; } else if (fe==fIf) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2 + 1 */ pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from if() */ } break; case 2: ndx2 = pfx->usedElements; if (fe==fWhile) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx0); } else if (fe==fDo) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx0 + 1); } else if (fe==fFor) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx3 */ pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from for() */ (void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS); } else if (fe==fIf) { (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx3 */ } break; case 3: if (fe==fFor) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx1); } ndx3 = pfx->usedElements; break; default: break; } if (chLimit == expChLimit) { lenExp = pfx->pex - pExpStart - 1; break; } } /* end while args of a function */ if (chLimit && chLimit != expChLimit && chLimit != ',' ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected '%c', found '%c' at '%s'", funStr, expChLimit, chLimit ? chLimit : ' ', SetShortExp(pfx)); return MagickFalse; } if (fe == fP || fe == fS || fe == fU) { while (FndArgs < Functions[fe-FirstFunc].nArgs) { (void) AddElement (pfx, (fxFltType) 0, oNull); FndArgs++; } } if (FndArgs > Functions[fe-FirstFunc].nArgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected %i arguments, found '%i' at '%s'", funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx)); return MagickFalse; } if (FndArgs < Functions[fe-FirstFunc].nArgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected %i arguments, found too few (%i) at '%s'", funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx)); return MagickFalse; } if (fe != fS && fe != fV && FndArgs == 0 && Functions[fe-FirstFunc].nArgs == 0) { /* This is for "rand()" and similar. */ chLimit = expChLimit; if (!ExpectChar (pfx, ')')) return MagickFalse; } if (chLimit != expChLimit) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', arguments don't end with '%c' at '%s'", funStr, expChLimit, SetShortExp(pfx)); return MagickFalse; } if (!PopOprOpenParen (pfx, pushOp)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: For function", "'%s' tos not '%s' at '%s'", funStr, Operators[pushOp].str, SetShortExp(pfx)); return MagickFalse; } if (IsQualifier (pfx)) { if (fe == fU || fe == fV || fe == fS) { coordQual = (GetCoordQualifier (pfx, fe) == 1) ? MagickTrue : MagickFalse; if (coordQual) { /* Remove last element, which should be fP */ ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->oprNum != fP) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: For function", "'%s' last element not 'p' at '%s'", funStr, SetShortExp(pfx)); return MagickFalse; } chQual = pel->ChannelQual; expChLimit = (pel->IsRelative) ? ']' : '}'; pfx->usedElements--; if (fe == fU) fe = fUP; else if (fe == fV) fe = fVP; else if (fe == fS) fe = fSP; funStr = Functions[fe-FirstFunc].str; } } if ( chQual == NO_CHAN_QUAL && (fe == fP || fe == fS || fe == fSP || fe == fU || fe == fUP || fe == fV || fe == fVP) ) { chQual = GetChannelQualifier (pfx, fe); } if (chQual == NO_CHAN_QUAL && (fe == fU || fe == fV || fe == fS)) { /* Note: we don't allow "p.mean" etc. */ iaQual = GetImgAttrQualifier (pfx, fe); } if (IsQualifier (pfx) && chQual == NO_CHAN_QUAL && iaQual != aNull) { chQual = GetChannelQualifier (pfx, fe); } if (coordQual && iaQual != aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', can't have qualifiers 'p' and image attribute '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!coordQual && chQual == NO_CHAN_QUAL && iaQual == aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', bad qualifier '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!coordQual && chQual == CompositePixelChannel && iaQual == aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', bad composite qualifier '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (chQual == HUE_CHANNEL || chQual == SAT_CHANNEL || chQual == LIGHT_CHANNEL) { pfx->NeedHsl = MagickTrue; if (iaQual >= FirstImgAttr && iaQual < aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Can't have image attribute with HLS qualifier at", "'%s'", SetShortExp(pfx)); return MagickFalse; } } } if (fe==fWhile) { pfx->Elements[ndx1].EleNdx = ndx2+1; } else if (fe==fDo) { pfx->Elements[ndx0].EleNdx = ndx1+1; pfx->Elements[ndx1].EleNdx = ndx2+1; } else if (fe==fFor) { pfx->Elements[ndx2].EleNdx = ndx3; } else if (fe==fIf) { pfx->Elements[ndx1].EleNdx = ndx2 + 1; pfx->Elements[ndx2].EleNdx = ndx3; } else { if (fe == fU && iaQual == aNull) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->type == etConstant && pel->val == 0.0) { pfx->usedElements--; fe = fU0; } } (void) AddElement (pfx, (fxFltType) 0, fe); if (fe == fP || fe == fU || fe == fU0 || fe == fUP || fe == fV || fe == fVP || fe == fS || fe == fSP) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; pel->IsRelative = (expChLimit == ']' ? MagickTrue : MagickFalse); if (chQual >= 0) pel->ChannelQual = chQual; if (iaQual != aNull && (fe == fU || fe == fV || fe == fS)) { /* Note: we don't allow "p[2,3].mean" or "p.mean" etc. */ pel->ImgAttrQual = iaQual; } } } if (pExpStart && lenExp) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; pel->pExpStart = pExpStart; pel->lenExp = lenExp; } if (fe == fDebug) pfx->ContainsDebug = MagickTrue; return MagickTrue; } static MagickBooleanType IsStealth (int op) { return (op == fU0 || op == fUP || op == fSP || op == fVP || (op >= FirstCont && op <= rNull) ? MagickTrue : MagickFalse ); } static MagickBooleanType GetOperand ( FxInfo * pfx, MagickBooleanType * UserSymbol, MagickBooleanType * NewUserSymbol, int * UserSymNdx, MagickBooleanType * needPopAll) { *NewUserSymbol = *UserSymbol = MagickFalse; *UserSymNdx = NULL_ADDRESS; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; (void) GetToken (pfx); if (pfx->lenToken==0) { /* Try '(' or unary prefix */ OperatorE op = GetLeadingOp (pfx); if (op==oOpenParen) { char chLimit = '\0'; if (!PushOperatorStack (pfx, op)) return MagickFalse; pfx->pex++; if (!TranslateExpression (pfx, ")", &chLimit, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Empty expression in parentheses at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (chLimit != ')') { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'(' but no ')' at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Top of opr stack should be '('. */ if (!PopOprOpenParen (pfx, oOpenParen)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: tos not '(' at", "'%s'", SetShortExp(pfx)); return MagickFalse; } return MagickTrue; } else if (OprIsUnaryPrefix (op)) { if (!PushOperatorStack (pfx, op)) return MagickFalse; pfx->pex++; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; if (!GetOperand (pfx, UserSymbol, NewUserSymbol, UserSymNdx, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After unary, bad operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (*NewUserSymbol) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After unary, NewUserSymbol at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (*UserSymbol) { (void) AddAddressingElement (pfx, rCopyFrom, *UserSymNdx); *UserSymNdx = NULL_ADDRESS; *UserSymbol = MagickFalse; *NewUserSymbol = MagickFalse; } (void) GetToken (pfx); return MagickTrue; } else if (*pfx->pex == '#') { fxFltType v0=0, v1=0, v2=0; ssize_t lenToken = GetHexColour (pfx, &v0, &v1, &v2); if (lenToken < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad hex number at", "'%s'", SetShortExp(pfx)); return MagickFalse; } else if (lenToken > 0) { (void) AddColourElement (pfx, v0, v1, v2); pfx->pex+=lenToken; } return MagickTrue; } /* Try a constant number. */ { char * tailptr; ssize_t lenOptArt; fxFltType val = strtold (pfx->pex, &tailptr); if (pfx->pex != tailptr) { pfx->pex = tailptr; if (*tailptr) { /* Could have "prefix" K, Ki, M etc. See https://en.wikipedia.org/wiki/Metric_prefix and https://en.wikipedia.org/wiki/Binary_prefix */ double Pow = 0.0; const char Prefices[] = "yzafpnum.kMGTPEZY"; const char * pSi = strchr (Prefices, *tailptr); if (pSi && *pSi != '.') Pow = (pSi - Prefices) * 3 - 24; else if (*tailptr == 'c') Pow = -2; else if (*tailptr == 'h') Pow = 2; else if (*tailptr == 'k') Pow = 3; if (Pow != 0.0) { if (*(++pfx->pex) == 'i') { val *= pow (2.0, Pow/0.3); pfx->pex++; } else { val *= pow (10.0, Pow); } } } (void) AddElement (pfx, val, oNull); return MagickTrue; } val = (fxFltType) 0; lenOptArt = GetProperty (pfx, &val); if (lenOptArt < 0) return MagickFalse; if (lenOptArt > 0) { (void) AddElement (pfx, val, oNull); pfx->pex += lenOptArt; return MagickTrue; } } } /* end of lenToken==0 */ if (pfx->lenToken > 0) { /* Try a constant */ { ConstantE ce; for (ce = (ConstantE)0; ce < cNull; ce=(ConstantE) (ce+1)) { const char * ceStr = Constants[ce].str; if (LocaleCompare (ceStr, pfx->token)==0) { break; } } if (ce != cNull) { (void) AddElement (pfx, Constants[ce].val, oNull); pfx->pex += pfx->lenToken; return MagickTrue; } } /* Try a function */ { FunctionE fe; for (fe = FirstFunc; fe < fNull; fe=(FunctionE) (fe+1)) { const char * feStr = Functions[fe-FirstFunc].str; if (LocaleCompare (feStr, pfx->token)==0) { break; } } if (fe == fV && pfx->ImgListLen < 2) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Symbol 'v' but fewer than two images at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (IsStealth (fe)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Function", "'%s' not permitted at '%s'", pfx->token, SetShortExp(pfx)); } if (fe == fDo || fe == fFor || fe == fIf || fe == fWhile) { *needPopAll = MagickTrue; } if (fe != fNull) return (GetFunction (pfx, fe)); } /* Try image attribute */ { ImgAttrE ia = GetImgAttrToken (pfx); if (ia != aNull) { fxFltType val = 0; (void) AddElement (pfx, val, ia); if (ImgAttrs[ia-FirstImgAttr].NeedStats==1) { if (IsQualifier (pfx)) { PixelChannel chQual = GetChannelQualifier (pfx, ia); ElementT * pel; if (chQual == NO_CHAN_QUAL) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad channel qualifier at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Adjust the element */ pel = &pfx->Elements[pfx->usedElements-1]; pel->ChannelQual = chQual; } } return MagickTrue; } } /* Try symbol */ { SymbolE se; for (se = FirstSym; se < sNull; se=(SymbolE) (se+1)) { const char * seStr = Symbols[se-FirstSym].str; if (LocaleCompare (seStr, pfx->token)==0) { break; } } if (se != sNull) { fxFltType val = 0; (void) AddElement (pfx, val, se); pfx->pex += pfx->lenToken; if (se==sHue || se==sSaturation || se==sLightness) pfx->NeedHsl = MagickTrue; return MagickTrue; } } /* Try constant colour. */ { fxFltType v0, v1, v2; ssize_t ColLen = GetConstantColour (pfx, &v0, &v1, &v2); if (ColLen < 0) return MagickFalse; if (ColLen > 0) { (void) AddColourElement (pfx, v0, v1, v2); pfx->pex+=ColLen; return MagickTrue; } } /* Try image artifact. */ { const char *artifact; artifact = GetImageArtifact (pfx->image, pfx->token); if (artifact != (const char *) NULL) { char * tailptr; fxFltType val = strtold (artifact, &tailptr); if (pfx->token == tailptr) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Artifact", "'%s' has value '%s', not a number, at '%s'", pfx->token, artifact, SetShortExp(pfx)); return MagickFalse; } (void) AddElement (pfx, val, oNull); pfx->pex+=pfx->lenToken; return MagickTrue; } } /* Try user symbols. If it is, don't AddElement yet. */ if (TokenMaybeUserSymbol (pfx)) { *UserSymbol = MagickTrue; *UserSymNdx = FindUserSymbol (pfx, pfx->token); if (*UserSymNdx == NULL_ADDRESS) { *UserSymNdx = AddUserSymbol (pfx, pfx->pex, pfx->lenToken); *NewUserSymbol = MagickTrue; } else { } pfx->pex += pfx->lenToken; return MagickTrue; } } (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } static MagickBooleanType inline IsRealOperator (OperatorE op) { return (op < oOpenParen || op > oCloseBrace) ? MagickTrue : MagickFalse; } static MagickBooleanType inline ProcessTernaryOpr (FxInfo * pfx, TernaryT * ptern) /* Ternary operator "... ? ... : ..." returns false iff we have exception */ { if (pfx->usedOprStack == 0) return MagickFalse; if (pfx->OperatorStack[pfx->usedOprStack-1] == oQuery) { if (ptern->addrQuery != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have '?' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have ':' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; ptern->addrQuery = pfx->usedElements; (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be one after the Colon address. */ } else if (pfx->OperatorStack[pfx->usedOprStack-1] == oColon) { if (ptern->addrQuery == NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Need '?' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have ':' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; ptern->addrColon = pfx->usedElements; pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be after the subexpression */ } return MagickTrue; } static MagickBooleanType GetOperator ( FxInfo * pfx, MagickBooleanType * Assign, MagickBooleanType * Update, MagickBooleanType * IncrDecr) { OperatorE op; size_t len = 0; MagickBooleanType DoneIt = MagickFalse; SkipSpaces (pfx); for (op = (OperatorE)0; op != oNull; op=(OperatorE) (op+1)) { const char * opStr = Operators[op].str; len = strlen(opStr); if (LocaleNCompare (opStr, pfx->pex, len)==0) { break; } } if (!IsRealOperator (op)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Not a real operator at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (op==oNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operator at", "'%s'", SetShortExp(pfx)); return MagickFalse; } *Assign = (op==oAssign) ? MagickTrue : MagickFalse; *Update = OprInPlace (op); *IncrDecr = (op == oPlusPlus || op == oSubSub) ? MagickTrue : MagickFalse; /* while top of OperatorStack is not empty and is not open-parens or assign, and top of OperatorStack is higher precedence than new op, then move top of OperatorStack to Element list. */ while (pfx->usedOprStack > 0) { OperatorE top = pfx->OperatorStack[pfx->usedOprStack-1]; int precTop, precNew; if (top == oOpenParen || top == oAssign || OprInPlace (top)) break; precTop = Operators[top].precedence; precNew = Operators[op].precedence; /* Assume left associativity. If right assoc, this would be "<=". */ if (precTop < precNew) break; (void) AddElement (pfx, (fxFltType) 0, top); pfx->usedOprStack--; } /* If new op is close paren, and stack top is open paren, remove stack top. */ if (op==oCloseParen) { if (pfx->usedOprStack == 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Found ')' but nothing on stack at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (pfx->OperatorStack[pfx->usedOprStack-1] != oOpenParen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Found ')' but no '(' on stack at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; DoneIt = MagickTrue; } if (!DoneIt) { if (!PushOperatorStack (pfx, op)) return MagickFalse; } pfx->pex += len; return MagickTrue; } static MagickBooleanType ResolveTernaryAddresses (FxInfo * pfx, TernaryT * ptern) { if (ptern->addrQuery == NULL_ADDRESS && ptern->addrColon == NULL_ADDRESS) return MagickTrue; if (ptern->addrQuery != NULL_ADDRESS && ptern->addrColon != NULL_ADDRESS) { pfx->Elements[ptern->addrQuery].EleNdx = ptern->addrColon + 1; pfx->Elements[ptern->addrColon].EleNdx = pfx->usedElements; ptern->addrQuery = NULL_ADDRESS; ptern->addrColon = NULL_ADDRESS; } else if (ptern->addrQuery != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'?' with no corresponding ':'", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } else if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "':' with no corresponding '?'", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } return MagickTrue; } static MagickBooleanType TranslateExpression ( FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll) { /* There should be only one New per expression (oAssign), but can be many Old. */ MagickBooleanType UserSymbol, NewUserSymbol; int UserSymNdx0, UserSymNdx1; MagickBooleanType Assign = MagickFalse, Update = MagickFalse, IncrDecr = MagickFalse; int StartEleNdx; TernaryT ternary; ternary.addrQuery = NULL_ADDRESS; ternary.addrColon = NULL_ADDRESS; pfx->teDepth++; *chLimit = '\0'; StartEleNdx = pfx->usedElements-1; if (StartEleNdx < 0) StartEleNdx = 0; SkipSpaces (pfx); if (!*pfx->pex) { pfx->teDepth--; return MagickFalse; } if (strchr(strLimit,*pfx->pex)!=NULL) { *chLimit = *pfx->pex; pfx->pex++; pfx->teDepth--; return MagickFalse; } if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx0, needPopAll)) return MagickFalse; SkipSpaces (pfx); /* Loop through Operator, Operand, Operator, Operand, ... */ while (*pfx->pex && (!*strLimit || (strchr(strLimit,*pfx->pex)==NULL))) { if (!GetOperator (pfx, &Assign, &Update, &IncrDecr)) return MagickFalse; SkipSpaces (pfx); if (NewUserSymbol && !Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected assignment after new UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!UserSymbol && Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attempted assignment to non-UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!UserSymbol && Update) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attempted update to non-UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (UserSymbol && (Assign || Update) && !IncrDecr) { if (!TranslateExpression (pfx, strLimit, chLimit, needPopAll)) return MagickFalse; if (!*pfx->pex) break; if (!*strLimit) break; if (strchr(strLimit,*chLimit)!=NULL) break; } if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) { ElementT * pel; (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0); UserSymNdx0 = NULL_ADDRESS; pel = &pfx->Elements[pfx->usedElements-1]; pel->DoPush = MagickTrue; } if (UserSymbol) { while (TopOprIsUnaryPrefix (pfx)) { OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1]; (void) AddElement (pfx, (fxFltType) 0, op); pfx->usedOprStack--; } } if (!ProcessTernaryOpr (pfx, &ternary)) return MagickFalse; if (ternary.addrColon != NULL_ADDRESS) { if (!TranslateExpression (pfx, ",);", chLimit, needPopAll)) return MagickFalse; break; } UserSymbol = NewUserSymbol = MagickFalse; if ( (!*pfx->pex) || (*strLimit && (strchr(strLimit,*pfx->pex)!=NULL) ) ) { if (IncrDecr) break; (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand after operator", "at '%s'", SetShortExp(pfx)); return MagickFalse; } if (IncrDecr) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'++' and '--' must be the final operators in an expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx1, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } SkipSpaces (pfx); if (NewUserSymbol && !Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "NewUserSymbol", "'%s' after non-assignment operator at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (UserSymbol && !NewUserSymbol) { (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx1); UserSymNdx1 = NULL_ADDRESS; } UserSymNdx0 = UserSymNdx1; } if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) { ElementT * pel; if (NewUserSymbol) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "NewUserSymbol", "'%s' needs assignment operator at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0); pel = &pfx->Elements[pfx->usedElements-1]; pel->DoPush = MagickTrue; } if (*pfx->pex && !*chLimit && (strchr(strLimit,*pfx->pex)!=NULL)) { *chLimit = *pfx->pex; pfx->pex++; } while (pfx->usedOprStack) { OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1]; if (op == oOpenParen || op == oOpenBracket || op == oOpenBrace) { break; } if ( (op==oAssign && !Assign) || (OprInPlace(op) && !Update) ) { break; } pfx->usedOprStack--; (void) AddElement (pfx, (fxFltType) 0, op); if (op == oAssign) { if (UserSymNdx0 < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Assignment to unknown user symbol at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Adjust last element, by deletion and add. */ pfx->usedElements--; (void) AddAddressingElement (pfx, rCopyTo, UserSymNdx0); break; } else if (OprInPlace (op)) { if (UserSymNdx0 < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Operator-in-place to unknown user symbol at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Modify latest element. */ pfx->Elements[pfx->usedElements-1].EleNdx = UserSymNdx0; break; } } (void) ResolveTernaryAddresses (pfx, &ternary); pfx->teDepth--; if (!pfx->teDepth && *needPopAll) { (void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS); *needPopAll = MagickFalse; } if (pfx->exception->severity != UndefinedException) return MagickFalse; return MagickTrue; } static MagickBooleanType TranslateStatement (FxInfo * pfx, char * strLimit, char * chLimit) { MagickBooleanType NeedPopAll = MagickFalse; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; if (!TranslateExpression (pfx, strLimit, chLimit, &NeedPopAll)) { return MagickFalse; } if (pfx->usedElements && *chLimit==';') { /* FIXME: not necessarily the last element, but the last _executed_ element, eg "goto" in a "for()"., Pending a fix, we will use rZerStk. */ ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->DoPush) pel->DoPush = MagickFalse; } return MagickTrue; } static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit) { #define MAX_SLIMIT 10 char sLimits[MAX_SLIMIT]; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; (void) CopyMagickString (sLimits, strLimit, MAX_SLIMIT-1); if (strchr(strLimit,';')==NULL) (void) ConcatenateMagickString (sLimits, ";", MAX_SLIMIT); for (;;) { if (!TranslateStatement (pfx, sLimits, chLimit)) return MagickFalse; if (!*pfx->pex) break; if (*chLimit != ';') { break; } } if (pfx->exception->severity != UndefinedException) return MagickFalse; return MagickTrue; } /*-------------------------------------------------------------------- Run-time */ static ChannelStatistics *CollectOneImgStats (FxInfo * pfx, Image * img) { int ch; ChannelStatistics * cs = GetImageStatistics (img, pfx->exception); /* Use RelinquishMagickMemory() somewhere. */ for (ch=0; ch <= (int) MaxPixelChannels; ch++) { cs[ch].mean *= QuantumScale; cs[ch].median *= QuantumScale; cs[ch].maxima *= QuantumScale; cs[ch].minima *= QuantumScale; cs[ch].standard_deviation *= QuantumScale; cs[ch].kurtosis *= QuantumScale; cs[ch].skewness *= QuantumScale; cs[ch].entropy *= QuantumScale; } return cs; } static MagickBooleanType CollectStatistics (FxInfo * pfx) { Image * img = GetFirstImageInList (pfx->image); size_t imgNum=0; pfx->statistics = (ChannelStatistics**) AcquireMagickMemory (pfx->ImgListLen * sizeof (ChannelStatistics *)); if (!pfx->statistics) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Statistics", "%lu", pfx->ImgListLen); return MagickFalse; } for (;;) { pfx->statistics[imgNum] = CollectOneImgStats (pfx, img); if (++imgNum == pfx->ImgListLen) break; img = GetNextImageInList (img); assert (img != (Image *) NULL); } pfx->GotStats = MagickTrue; return MagickTrue; } static MagickBooleanType inline PushVal (FxInfo * pfx, fxRtT * pfxrt, fxFltType val, int addr) { if (pfxrt->usedValStack >=pfxrt->numValStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack overflow at addr=", "%i", addr); return MagickFalse; } pfxrt->ValStack[pfxrt->usedValStack++] = val; return MagickTrue; } static inline fxFltType PopVal (FxInfo * pfx, fxRtT * pfxrt, int addr) { if (pfxrt->usedValStack <= 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack underflow at addr=", "%i", addr); return (fxFltType) 0; } return pfxrt->ValStack[--pfxrt->usedValStack]; } static inline fxFltType ImageStat ( FxInfo * pfx, ssize_t ImgNum, PixelChannel channel, ImgAttrE ia) { ChannelStatistics * cs = NULL; fxFltType ret = 0; MagickBooleanType NeedRelinq = MagickFalse; assert (channel >= 0 && channel <= MaxPixelChannels); if (pfx->GotStats) { cs = pfx->statistics[ImgNum]; } else if (pfx->NeedStats) { /* If we need more than one statistic per pixel, this is inefficient. */ cs = CollectOneImgStats (pfx, pfx->Images[ImgNum]); NeedRelinq = MagickTrue; } switch (ia) { case aDepth: ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception); break; case aExtent: ret = (fxFltType) GetBlobSize (pfx->image); break; case aKurtosis: ret = cs[channel].kurtosis; break; case aMaxima: ret = cs[channel].maxima; break; case aMean: ret = cs[channel].mean; break; case aMedian: ret = cs[channel].median; break; case aMinima: ret = cs[channel].minima; break; case aPage: /* Do nothing */ break; case aPageX: ret = (fxFltType) pfx->Images[ImgNum]->page.x; break; case aPageY: ret = (fxFltType) pfx->Images[ImgNum]->page.y; break; case aPageWid: ret = (fxFltType) pfx->Images[ImgNum]->page.width; break; case aPageHt: ret = (fxFltType) pfx->Images[ImgNum]->page.height; break; case aPrintsize: /* Do nothing */ break; case aPrintsizeX: ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.x) * pfx->Images[ImgNum]->columns; break; case aPrintsizeY: ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.y) * pfx->Images[ImgNum]->rows; break; case aQuality: ret = (fxFltType) pfx->Images[ImgNum]->quality; break; case aRes: /* Do nothing */ break; case aResX: ret = pfx->Images[ImgNum]->resolution.x; break; case aResY: ret = pfx->Images[ImgNum]->resolution.y; break; case aSkewness: ret = cs[channel].skewness; break; case aStdDev: ret = cs[channel].standard_deviation; break; case aH: ret = (fxFltType) pfx->Images[ImgNum]->rows; break; case aN: ret = (fxFltType) pfx->ImgListLen; break; case aT: /* image index in list */ ret = (fxFltType) ImgNum; break; case aW: ret = (fxFltType) pfx->Images[ImgNum]->columns; break; case aZ: ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception); break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Unknown ia=", "%i", ia); } if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs); return ret; } static fxFltType inline FxGcd (fxFltType x, fxFltType y, const size_t depth) { #define FxMaxFunctionDepth 200 if (x < y) return (FxGcd (y, x, depth+1)); if ((fabs((double) y) < 0.001) || (depth >= FxMaxFunctionDepth)) return (x); return (FxGcd (y, x-y*floor((double) (x/y)), depth+1)); } static ssize_t inline ChkImgNum (FxInfo * pfx, fxFltType f) /* Returns -1 if f is too large. */ { ssize_t i = (ssize_t) floor ((double) f + 0.5); if (i < 0) i += pfx->ImgListLen; if (i < 0 || i >= (ssize_t)pfx->ImgListLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ImgNum", "%lu bad for ImgListLen %lu", i, pfx->ImgListLen); i = -1; } return i; } #define WHICH_ATTR_CHAN \ (pel->ChannelQual == NO_CHAN_QUAL) ? CompositePixelChannel : \ (pel->ChannelQual == THIS_CHANNEL) ? channel : pel->ChannelQual #define WHICH_NON_ATTR_CHAN \ (pel->ChannelQual == NO_CHAN_QUAL || \ pel->ChannelQual == THIS_CHANNEL || \ pel->ChannelQual == CompositePixelChannel \ ) ? (channel == CompositePixelChannel ? RedPixelChannel: channel) \ : pel->ChannelQual static fxFltType GetHslFlt (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy, int channel) { Image * img = pfx->Images[ImgNum]; double red, green, blue; double hue=0, saturation=0, lightness=0; MagickBooleanType okay = MagickTrue; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, RedPixelChannel, img->interpolate, (double) fx, (double) fy, &red, pfx->exception)) okay = MagickFalse; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, GreenPixelChannel, img->interpolate, (double) fx, (double) fy, &green, pfx->exception)) okay = MagickFalse; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, BluePixelChannel, img->interpolate, (double) fx, (double) fy, &blue, pfx->exception)) okay = MagickFalse; if (!okay) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetHslFlt failure", "%lu %Lg,%Lg %i", ImgNum, fx, fy, channel); ConvertRGBToHSL ( red, green, blue, &hue, &saturation, &lightness); if (channel == HUE_CHANNEL) return hue; if (channel == SAT_CHANNEL) return saturation; if (channel == LIGHT_CHANNEL) return lightness; return 0.0; } static fxFltType GetHslInt (FxInfo * pfx, ssize_t ImgNum, const ssize_t imgx, const ssize_t imgy, int channel) { Image * img = pfx->Images[ImgNum]; double hue=0, saturation=0, lightness=0; const Quantum * p = GetCacheViewVirtualPixels (pfx->Imgs[ImgNum].View, imgx, imgy, 1, 1, pfx->exception); if (!p) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetHslInt failure", "%lu %li,%li %i", ImgNum, imgx, imgy, channel); ConvertRGBToHSL ( GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p), &hue, &saturation, &lightness); if (channel == HUE_CHANNEL) return hue; if (channel == SAT_CHANNEL) return saturation; if (channel == LIGHT_CHANNEL) return lightness; return 0.0; } static fxFltType inline GetIntensity (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy) { Quantum quantum_pixel[MaxPixelChannels]; PixelInfo pixelinf; Image * img = pfx->Images[ImgNum]; (void) GetPixelInfo (img, &pixelinf); if (!InterpolatePixelInfo (img, pfx->Imgs[pfx->ImgNum].View, img->interpolate, (double) fx, (double) fy, &pixelinf, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetIntensity failure", "%lu %Lg,%Lg", ImgNum, fx, fy); } SetPixelViaPixelInfo (img, &pixelinf, quantum_pixel); return QuantumScale * GetPixelIntensity (img, quantum_pixel); } static MagickBooleanType ExecuteRPN (FxInfo * pfx, fxRtT * pfxrt, fxFltType *result, const PixelChannel channel, const ssize_t imgx, const ssize_t imgy) { const Quantum * p = pfxrt->thisPixel; fxFltType regA=0, regB=0, regC=0, regD=0, regE=0; Image * img = pfx->image; ChannelStatistics * cs = NULL; MagickBooleanType NeedRelinq = MagickFalse; double hue=0, saturation=0, lightness=0; int i; /* For -fx, this sets p to ImgNum 0. for %[fx:...], this sets p to the currrent image. Similarly img. */ if (!p) p = GetCacheViewVirtualPixels ( pfx->Imgs[pfx->ImgNum].View, imgx, imgy, 1, 1, pfx->exception); if (pfx->GotStats) { cs = pfx->statistics[pfx->ImgNum]; } else if (pfx->NeedStats) { cs = CollectOneImgStats (pfx, pfx->Images[pfx->ImgNum]); NeedRelinq = MagickTrue; } /* Folllowing is only for expressions like "saturation", with no image specifier. */ if (pfx->NeedHsl) { ConvertRGBToHSL ( GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p), &hue, &saturation, &lightness); } for (i=0; i < pfx->usedElements; i++) { ElementT *pel = &pfx->Elements[i]; switch (pel->nArgs) { case 0: break; case 1: regA = PopVal (pfx, pfxrt, i); break; case 2: regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 3: regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 4: regD = PopVal (pfx, pfxrt, i); regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 5: regE = PopVal (pfx, pfxrt, i); regD = PopVal (pfx, pfxrt, i); regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Too many args:", "%i", pel->nArgs); break; } switch (pel->oprNum) { case oAddEq: regA = (pfxrt->UserSymVals[pel->EleNdx] += regA); break; case oSubtractEq: regA = (pfxrt->UserSymVals[pel->EleNdx] -= regA); break; case oMultiplyEq: regA = (pfxrt->UserSymVals[pel->EleNdx] *= regA); break; case oDivideEq: regA = (pfxrt->UserSymVals[pel->EleNdx] *= PerceptibleReciprocal((double)regA)); break; case oPlusPlus: regA = pfxrt->UserSymVals[pel->EleNdx]++; break; case oSubSub: regA = pfxrt->UserSymVals[pel->EleNdx]--; break; case oAdd: regA += regB; break; case oSubtract: regA -= regB; break; case oMultiply: regA *= regB; break; case oDivide: regA *= PerceptibleReciprocal((double)regB); break; case oModulus: regA = fmod ((double) regA, fabs(floor((double) regB+0.5))); break; case oUnaryPlus: /* Do nothing. */ break; case oUnaryMinus: regA = -regA; break; case oLshift: if ((size_t) (regB+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "undefined shift", "%g", (double) regB); regA = (fxFltType) 0.0; break; } regA = (fxFltType) ((size_t)(regA+0.5) << (size_t)(regB+0.5)); break; case oRshift: if ((size_t) (regB+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "undefined shift", "%g", (double) regB); regA = (fxFltType) 0.0; break; } regA = (fxFltType) ((size_t)(regA+0.5) >> (size_t)(regB+0.5)); break; case oEq: regA = fabs((double) (regA-regB)) < MagickEpsilon ? 1.0 : 0.0; break; case oNotEq: regA = fabs((double) (regA-regB)) >= MagickEpsilon ? 1.0 : 0.0; break; case oLtEq: regA = (regA <= regB) ? 1.0 : 0.0; break; case oGtEq: regA = (regA >= regB) ? 1.0 : 0.0; break; case oLt: regA = (regA < regB) ? 1.0 : 0.0; break; case oGt: regA = (regA > regB) ? 1.0 : 0.0; break; case oLogAnd: regA = (regA<=0) ? 0.0 : (regB > 0) ? 1.0 : 0.0; break; case oLogOr: regA = (regA>0) ? 1.0 : (regB > 0.0) ? 1.0 : 0.0; break; case oLogNot: regA = (regA==0) ? 1.0 : 0.0; break; case oBitAnd: regA = (fxFltType) ((size_t)(regA+0.5) & (size_t)(regB+0.5)); break; case oBitOr: regA = (fxFltType) ((size_t)(regA+0.5) | (size_t)(regB+0.5)); break; case oBitNot: /* Old fx doesn't add 0.5. */ regA = (fxFltType) (~(size_t)(regA+0.5)); break; case oPow: regA = pow ((double) regA, (double) regB); break; case oQuery: case oColon: break; case oOpenParen: case oCloseParen: case oOpenBracket: case oCloseBracket: case oOpenBrace: case oCloseBrace: break; case oAssign: pel->val = regA; break; case oNull: { if (pel->type == etColourConstant) { switch (channel) { default: case 0: regA = pel->val; break; case 1: regA = pel->val1; break; case 2: regA = pel->val2; break; } } else { regA = pel->val; } break; } case fAbs: regA = fabs ((double) regA); break; #if defined(MAGICKCORE_HAVE_ACOSH) case fAcosh: regA = acosh ((double) regA); break; #endif case fAcos: regA = acos ((double) regA); break; #if defined(MAGICKCORE_HAVE_J1) case fAiry: if (regA==0) regA = 1.0; else { fxFltType gamma = 2.0 * j1 ((MagickPI*regA)) / (MagickPI*regA); regA = gamma * gamma; } break; #endif case fAlt: regA = (fxFltType) (((ssize_t) regA) & 0x01 ? -1.0 : 1.0); break; #if defined(MAGICKCORE_HAVE_ASINH) case fAsinh: regA = asinh ((double) regA); break; #endif case fAsin: regA = asin ((double) regA); break; #if defined(MAGICKCORE_HAVE_ATANH) case fAtanh: regA = atanh ((double) regA); break; #endif case fAtan2: regA = atan2 ((double) regA, (double) regB); break; case fAtan: regA = atan ((double) regA); break; case fCeil: regA = ceil ((double) regA); break; case fChannel: switch (channel) { case 0: break; case 1: regA = regB; break; case 2: regA = regC; break; case 3: regA = regD; break; case 4: regA = regE; break; default: regA = 0.0; } break; case fClamp: if (regA < 0) regA = 0.0; else if (regA > 1.0) regA = 1.0; break; case fCosh: regA = cosh ((double) regA); break; case fCos: regA = cos ((double) regA); break; case fDebug: /* FIXME: debug() should give channel name. */ (void) fprintf (stderr, "%s[%g,%g].[%i]: %s=%.*Lg\n", img->filename, (double) imgx, (double) imgy, channel, SetPtrShortExp (pfx, pel->pExpStart, (size_t) (pel->lenExp+1)), pfx->precision, regA); break; case fDrc: regA = regA / (regB*(regA-1.0) + 1.0); break; #if defined(MAGICKCORE_HAVE_ERF) case fErf: regA = erf ((double) regA); break; #endif case fExp: regA = exp ((double) regA); break; case fFloor: regA = floor ((double) regA); break; case fGauss: regA = exp((double) (-regA*regA/2.0))/sqrt(2.0*MagickPI); break; case fGcd: if (!IsNaN(regA)) regA = FxGcd (regA, regB, 0); break; case fHypot: regA = hypot ((double) regA, (double) regB); break; case fInt: regA = floor ((double) regA); break; case fIsnan: regA = (fxFltType) (!!IsNaN (regA)); break; #if defined(MAGICKCORE_HAVE_J0) case fJ0: regA = j0 ((double) regA); break; #endif #if defined(MAGICKCORE_HAVE_J1) case fJ1: regA = j1 ((double) regA); break; #endif #if defined(MAGICKCORE_HAVE_J1) case fJinc: if (regA==0) regA = 1.0; else regA = 2.0 * j1 ((MagickPI*regA))/(MagickPI*regA); break; #endif case fLn: regA = log ((double) regA); break; case fLogtwo: regA = log10((double) regA) / log10(2.0); break; case fLog: regA = log10 ((double) regA); break; case fMax: regA = (regA > regB) ? regA : regB; break; case fMin: regA = (regA < regB) ? regA : regB; break; case fMod: regA = regA - floor((double) (regA*PerceptibleReciprocal((double) regB)))*regB; break; case fNot: regA = (fxFltType) (regA < MagickEpsilon); break; case fPow: regA = pow ((double) regA, (double) regB); break; case fRand: { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExecuteRPN) #endif regA = GetPseudoRandomValue (pfxrt->random_info); break; } case fRound: regA = floor ((double) regA + 0.5); break; case fSign: regA = (regA < 0) ? -1.0 : 1.0; break; case fSinc: regA = sin ((double) (MagickPI*regA)) / (MagickPI*regA); break; case fSinh: regA = sinh ((double) regA); break; case fSin: regA = sin ((double) regA); break; case fSqrt: regA = sqrt ((double) regA); break; case fSquish: regA = 1.0 / (1.0 + exp ((double) -regA)); break; case fTanh: regA = tanh ((double) regA); break; case fTan: regA = tan ((double) regA); break; case fTrunc: if (regA >= 0) regA = floor ((double) regA); else regA = ceil ((double) regA); break; case fDo: case fFor: case fIf: case fWhile: break; case fU: { /* Note: 1 value is available, index into image list. May have ImgAttr qualifier or channel qualifier or both. */ ssize_t ImgNum = ChkImgNum (pfx, regA); if (ImgNum < 0) break; regA = (fxFltType) 0; if (ImgNum == 0) { Image * pimg = pfx->Images[0]; int pech = (int)pel->ChannelQual; if (pel->ImgAttrQual == aNull) { if (pech < 0) { if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } else if (pech == HUE_CHANNEL || pech == SAT_CHANNEL || pech == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pech); break; } else if (pech == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, 0, (double) imgx, (double) imgy); break; } } else { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } } else { /* we have an image atttribute */ regA = ImageStat (pfx, 0, WHICH_ATTR_CHAN, pel->ImgAttrQual); } } else { /* We have non-zero ImgNum. */ if (pel->ImgAttrQual == aNull) { const Quantum * pv; if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, (fxFltType) imgx, (fxFltType) imgy); break; } } pv = GetCacheViewVirtualPixels ( pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual); } } break; } case fU0: { /* No args. No image attribute. We may have a ChannelQual. If called from %[fx:...], ChannelQual will be CompositePixelChannel. */ Image * pimg = pfx->Images[0]; int pech = (int)pel->ChannelQual; if (pech < 0) { if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU0 can't get cache", "%i", 0); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } else if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, 0, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, 0, (fxFltType) imgx, (fxFltType) imgy); } } else { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU0 can't get cache", "%i", 0); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } break; } case fUP: { /* 3 args are: ImgNum, x, y */ ssize_t ImgNum = ChkImgNum (pfx, regA); fxFltType fx, fy; if (ImgNum < 0) break; if (pel->IsRelative) { fx = imgx + regB; fy = imgy + regC; } else { fx = regB; fy = regC; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, fx, fy); break; } } { double v; Image * imUP = pfx->Images[ImgNum]; if (! InterpolatePixelChannel (imUP, pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN, imUP->interpolate, (double) fx, (double) fy, &v, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fUP can't get interpolate", "%lu", ImgNum); break; } regA = v * QuantumScale; } break; } case fS: case fV: { /* No args. */ ssize_t ImgNum = 1; if (pel->oprNum == fS) ImgNum = pfx->ImgNum; if (pel->ImgAttrQual == aNull) { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fV can't get cache", "%lu", ImgNum); break; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, (double) imgx, (double) imgy); break; } } regA = QuantumScale * pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual); } break; } case fP: case fSP: case fVP: { /* 2 args are: x, y */ fxFltType fx, fy; ssize_t ImgNum = pfx->ImgNum; if (pel->oprNum == fVP) ImgNum = 1; if (pel->IsRelative) { fx = imgx + regA; fy = imgy + regB; } else { fx = regA; fy = regB; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, fx, fy); } } { double v; if (! InterpolatePixelChannel (pfx->Images[ImgNum], pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN, pfx->Images[ImgNum]->interpolate, (double) fx, (double) fy, &v, pfx->exception) ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fSP or fVP can't get interp", "%lu", ImgNum); break; } regA = v * (fxFltType)QuantumScale; } break; } case fNull: break; case aDepth: regA = (fxFltType) GetImageDepth (img, pfx->exception); break; case aExtent: regA = (fxFltType) img->extent; break; case aKurtosis: regA = cs[WHICH_ATTR_CHAN].kurtosis; break; case aMaxima: regA = cs[WHICH_ATTR_CHAN].maxima; break; case aMean: regA = cs[WHICH_ATTR_CHAN].mean; break; case aMedian: regA = cs[WHICH_ATTR_CHAN].median; break; case aMinima: regA = cs[WHICH_ATTR_CHAN].minima; break; case aPage: break; case aPageX: regA = (fxFltType) img->page.x; break; case aPageY: regA = (fxFltType) img->page.y; break; case aPageWid: regA = (fxFltType) img->page.width; break; case aPageHt: regA = (fxFltType) img->page.height; break; case aPrintsize: break; case aPrintsizeX: regA = (fxFltType) PerceptibleReciprocal (img->resolution.x) * img->columns; break; case aPrintsizeY: regA = (fxFltType) PerceptibleReciprocal (img->resolution.y) * img->rows; break; case aQuality: regA = (fxFltType) img->quality; break; case aRes: break; case aResX: regA = (fxFltType) img->resolution.x; break; case aResY: regA = (fxFltType) img->resolution.y; break; case aSkewness: regA = cs[WHICH_ATTR_CHAN].skewness; break; case aStdDev: regA = cs[WHICH_ATTR_CHAN].standard_deviation; break; case aH: /* image->rows */ regA = (fxFltType) img->rows; break; case aN: /* image list length */ regA = (fxFltType) pfx->ImgListLen; break; case aT: /* image index in list */ regA = (fxFltType) pfx->ImgNum; break; case aW: /* image->columns */ regA = (fxFltType) img->columns; break; case aZ: /* image depth */ regA = (fxFltType) GetImageDepth (img, pfx->exception); break; case aNull: break; case sHue: /* of conversion to HSL */ regA = hue; break; case sIntensity: regA = GetIntensity (pfx, pfx->ImgNum, (double) imgx, (double) imgy); break; case sLightness: /* of conversion to HSL */ regA = lightness; break; case sLuma: /* calculation */ case sLuminance: /* as Luma */ regA = QuantumScale * (0.212656 * GetPixelRed (img,p) + 0.715158 * GetPixelGreen (img,p) + 0.072186 * GetPixelBlue (img,p)); break; case sSaturation: /* from conversion to HSL */ regA = saturation; break; case sA: /* alpha */ regA = QuantumScale * GetPixelAlpha (img, p); break; case sB: /* blue */ regA = QuantumScale * GetPixelBlue (img, p); break; case sC: /* red (ie cyan) */ regA = QuantumScale * GetPixelCyan (img, p); break; case sG: /* green */ regA = QuantumScale * GetPixelGreen (img, p); break; case sI: /* current x-coordinate */ regA = (fxFltType) imgx; break; case sJ: /* current y-coordinate */ regA = (fxFltType) imgy; break; case sK: /* black of CMYK */ regA = QuantumScale * GetPixelBlack (img, p); break; case sM: /* green (ie magenta) */ regA = QuantumScale * GetPixelGreen (img, p); break; case sO: /* alpha */ regA = QuantumScale * GetPixelAlpha (img, p); break; case sR: regA = QuantumScale * GetPixelRed (img, p); break; case sY: regA = QuantumScale * GetPixelYellow (img, p); break; case sNull: break; case rGoto: assert (pel->EleNdx >= 0); i = pel->EleNdx-1; /* -1 because 'for' loop will increment. */ break; case rIfZeroGoto: assert (pel->EleNdx >= 0); if (fabs((double) regA) < MagickEpsilon) i = pel->EleNdx-1; break; case rIfNotZeroGoto: assert (pel->EleNdx >= 0); if (fabs((double) regA) > MagickEpsilon) i = pel->EleNdx-1; break; case rCopyFrom: assert (pel->EleNdx >= 0); regA = pfxrt->UserSymVals[pel->EleNdx]; break; case rCopyTo: assert (pel->EleNdx >= 0); pfxrt->UserSymVals[pel->EleNdx] = regA; break; case rZerStk: pfxrt->usedValStack = 0; break; case rNull: break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "pel->oprNum", "%i '%s' not yet implemented", (int)pel->oprNum, OprStr(pel->oprNum)); break; } if (i < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad run-time address", "%i", i); } if (pel->DoPush) if (!PushVal (pfx, pfxrt, regA, i)) break; } if (pfxrt->usedValStack > 0) regA = PopVal (pfx, pfxrt, 9999); *result = regA; if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs); if (pfx->exception->severity != UndefinedException) { return MagickFalse; } if (pfxrt->usedValStack != 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack not empty", "(%i)", pfxrt->usedValStack); return MagickFalse; } return MagickTrue; } /* Following is substitute for FxEvaluateChannelExpression(). */ MagickPrivate MagickBooleanType FxEvaluateChannelExpression ( FxInfo *pfx, const PixelChannel channel, const ssize_t x, const ssize_t y, double *result, ExceptionInfo *exception) { const int id = GetOpenMPThreadId(); fxFltType ret; assert (pfx != NULL); assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); pfx->fxrts[id].thisPixel = NULL; if (!ExecuteRPN (pfx, &pfx->fxrts[id], &ret, channel, x, y)) { (void) ThrowMagickException ( exception, GetMagickModule(), OptionError, "ExcuteRPN failed", " "); return MagickFalse; } *result = (double) ret; return MagickTrue; } static FxInfo *AcquireFxInfoPrivate (const Image * images, const char * expression, MagickBooleanType CalcAllStats, ExceptionInfo *exception) { char chLimit; FxInfo * pfx = (FxInfo*) AcquireCriticalMemory (sizeof (*pfx)); memset (pfx, 0, sizeof (*pfx)); if (!InitFx (pfx, images, CalcAllStats, exception)) { pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (!BuildRPN (pfx)) { (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (*expression == '@') pfx->expression = FileToString (expression+1, ~0UL, exception); else pfx->expression = ConstantString (expression); pfx->pex = (char *)pfx->expression; pfx->teDepth = 0; if (!TranslateStatementList (pfx, ";", &chLimit)) { (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (pfx->teDepth) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Translate expression depth", "(%i) not 0", pfx->teDepth); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (chLimit != '\0' && chLimit != ';') { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "AcquireFxInfo: TranslateExpression did not exhaust input", "(chLimit=%i) at'%s'", (int)chLimit, pfx->pex); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (pfx->NeedStats && pfx->runType == rtEntireImage && !pfx->statistics) { if (!CollectStatistics (pfx)) { (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } } if (pfx->DebugOpt) { DumpTables (stderr); DumpUserSymbols (pfx, stderr); (void) DumpRPN (pfx, stderr); } { size_t number_threads=(size_t) GetMagickResourceLimit(ThreadResource); ssize_t t; pfx->fxrts = (fxRtT *)AcquireQuantumMemory (number_threads, sizeof(fxRtT)); if (!pfx->fxrts) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "fxrts", "%lu", number_threads); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } for (t=0; t < (ssize_t) number_threads; t++) { if (!AllocFxRt (pfx, &pfx->fxrts[t])) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "AllocFxRt t=", "%g", (double) t); { ssize_t t2; for (t2 = t-1; t2 >= 0; t2--) { DestroyFxRt (&pfx->fxrts[t]); } } pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } } } return pfx; } FxInfo *AcquireFxInfo (const Image * images, const char * expression, ExceptionInfo *exception) { return AcquireFxInfoPrivate (images, expression, MagickFalse, exception); } FxInfo *DestroyFxInfo (FxInfo * pfx) { ssize_t t; assert (pfx != NULL); assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); for (t=0; t < (ssize_t) GetMagickResourceLimit(ThreadResource); t++) { DestroyFxRt (&pfx->fxrts[t]); } pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts); DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } /* Following is substitute for FxImage(). */ MagickExport Image *FxImage (const Image *image, const char *expression, ExceptionInfo *exception) { #define FxImageTag "FxNew/Image" CacheView *fx_view, *image_view; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; FxInfo *pfx; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (!fx_image) return NULL; if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_image=DestroyImage(fx_image); return NULL; } pfx = AcquireFxInfoPrivate (image, expression, MagickTrue, exception); if (!pfx) { fx_image=DestroyImage(fx_image); return NULL; } assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); status=MagickTrue; progress=0; image_view = AcquireVirtualCacheView (image, pfx->exception); fx_view = AcquireAuthenticCacheView (fx_image, pfx->exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows, \ pfx->ContainsDebug ? 0 : 1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; fxFltType result = 0.0; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels (image_view, 0, y, image->columns, 1, pfx->exception); q = QueueCacheViewAuthenticPixels (fx_view, 0, y, fx_image->columns, 1, pfx->exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { ssize_t i; pfx->fxrts[id].thisPixel = (Quantum *)p; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel (image, i); PixelTrait traits = GetPixelChannelTraits (image, channel); PixelTrait fx_traits = GetPixelChannelTraits (fx_image, channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel (fx_image, channel, p[i], q); continue; } if (!ExecuteRPN (pfx, &pfx->fxrts[id], &result, channel, x, y)) { status=MagickFalse; break; } q[i] = ClampToQuantum ((MagickRealType) (QuantumRange*result)); } p+=GetPixelChannels (image); q+=GetPixelChannels (fx_image); } if (SyncCacheViewAuthenticPixels(fx_view, pfx->exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress (image, FxImageTag, progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view = DestroyCacheView (fx_view); image_view = DestroyCacheView (image_view); /* Before destroying the user symbol values, dump them to stderr. */ if (pfx->DebugOpt && pfx->usedUserSymbols) { int t, i; char UserSym[MagickPathExtent]; fprintf (stderr, "User symbols (%i):\n", pfx->usedUserSymbols); for (t=0; t < (int) GetMagickResourceLimit(ThreadResource); t++) { for (i = 0; i < (int) pfx->usedUserSymbols; i++) { fprintf (stderr, "th=%i us=%i '%s': %.*Lg\n", t, i, NameOfUserSym (pfx, i, UserSym), pfx->precision, pfx->fxrts[t].UserSymVals[i]); } } } if (pfx->exception->severity != UndefinedException) { status = MagickFalse; } if (status == MagickFalse) fx_image = DestroyImage (fx_image); pfx = DestroyFxInfo (pfx); return(fx_image); }
GB_unop__log10_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log10_fp32_fp32) // op(A') function: GB (_unop_tran__log10_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = log10f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log10f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = log10f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log10_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = log10f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = log10f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log10_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
openmp_critical2.c
///TAFFO_TEST_ARGS -Xvra -propagate-all -fopenmp #include <stdio.h> #define MAX_N (100) int main(int argc, char *argv[]) { float array[MAX_N] __attribute__((annotate("scalar(range(0,100))"))); int i = 0; #pragma omp parallel for for (i = 0; i < MAX_N; i++) { array[i] = i * 1.0; } float result __attribute__((annotate("scalar(range(0,5000))"))) = 0; #pragma omp parallel for for (i = 0; i < MAX_N; i++) { #pragma omp critical result += array[i]; } printf("result: %f\n", result); }
Pi.c
#include <stdio.h> #include <omp.h> static long num_steps = 100000; double step; #define NUM_THREADS 4 // follow the SPMD mode int main(int argc, char const *argv[]) { int i; double pi, sum[NUM_THREADS], start_time, end_time; step = 1.0 / (double)num_steps; omp_set_num_threads(NUM_THREADS); start_time = omp_get_wtime(); #pragma omp parallel { int id; double x; id = omp_get_thread_num(); for (i = id, sum[id] = 0.0; i < num_steps; i = i + NUM_THREADS) { x = (i - 0.5) * step; sum[id] += 4.0 / (1.0 + x * x); } } // pi = step * sum; for (i = 0, pi = 0.0; i < NUM_THREADS; i++) { pi += sum[i] * step; } end_time = omp_get_wtime(); printf("Estimated Pi:%f\nRunning time: %f \n", pi, end_time - start_time); return 0; }
sapG_fmt_plug.c
/* * this is a SAP PASSCODE (CODEVN G) plugin for john the ripper. * tested on linux/x86 only, rest is up to you.. at least, someone did the reversing :-) * * please note: this code is in a "works for me"-state, feel free to modify/speed up/clean/whatever it... * * (c) x7d8 sap loverz, public domain, btw * cheers: see test-cases. * * Heavily modified by magnum 2011-2012 for performance and for SIMD, OMP and * encodings support. Copyright (c) 2011, 2012 magnum, and it is hereby released * to the general public under the following terms: Redistribution and use in * source and binary forms, with or without modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sapG; #elif FMT_REGISTERS_H john_register_one(&fmt_sapG); #else #include <string.h> #include <ctype.h> #include "arch.h" #ifdef MMX_COEF #define NBKEYS (MMX_COEF * SHA1_SSE_PARA) #endif #include "sse-intrinsics.h" #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #include "options.h" #include "unicode.h" #include "johnswap.h" #define FORMAT_LABEL "sapg" #define FORMAT_NAME "SAP CODVN F/G (PASSCODE)" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME static unsigned int omp_t = 1; #if defined(_OPENMP) #include <omp.h> #ifdef MMX_COEF #define OMP_SCALE 128 #else #define OMP_SCALE 2048 #endif #endif #include "memdbg.h" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define SALT_FIELD_LENGTH 40 #define USER_NAME_LENGTH 12 /* max. length of user name in characters */ #define SALT_LENGTH (USER_NAME_LENGTH*3) /* 12 characters of UTF-8 */ #define PLAINTEXT_LENGTH 40 /* Characters of UTF-8 */ #define UTF8_PLAINTEXT_LENGTH (PLAINTEXT_LENGTH*3) /* worst case */ #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct saltstruct) #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + 2*BINARY_SIZE) /* SALT + $ + 2x20 bytes for SHA1-representation */ #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&60)*MMX_COEF + (3-((i)&3)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF*4 ) //for endianity conversion #define GETWORDPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&60)*MMX_COEF + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF*4 ) #define GETSTARTPOS(index) ( (index&(MMX_COEF-1))*4 + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF*4 ) #define GETOUTPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + (3-((i)&3)) + (index>>(MMX_COEF>>1))*20*MMX_COEF ) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif //this array is from disp+work (sap's worker process) #define MAGIC_ARRAY_SIZE 160 static const unsigned char theMagicArray[MAGIC_ARRAY_SIZE]= {0x91, 0xAC, 0x51, 0x14, 0x9F, 0x67, 0x54, 0x43, 0x24, 0xE7, 0x3B, 0xE0, 0x28, 0x74, 0x7B, 0xC2, 0x86, 0x33, 0x13, 0xEB, 0x5A, 0x4F, 0xCB, 0x5C, 0x08, 0x0A, 0x73, 0x37, 0x0E, 0x5D, 0x1C, 0x2F, 0x33, 0x8F, 0xE6, 0xE5, 0xF8, 0x9B, 0xAE, 0xDD, 0x16, 0xF2, 0x4B, 0x8D, 0x2C, 0xE1, 0xD4, 0xDC, 0xB0, 0xCB, 0xDF, 0x9D, 0xD4, 0x70, 0x6D, 0x17, 0xF9, 0x4D, 0x42, 0x3F, 0x9B, 0x1B, 0x11, 0x94, 0x9F, 0x5B, 0xC1, 0x9B, 0x06, 0x05, 0x9D, 0x03, 0x9D, 0x5E, 0x13, 0x8A, 0x1E, 0x9A, 0x6A, 0xE8, 0xD9, 0x7C, 0x14, 0x17, 0x58, 0xC7, 0x2A, 0xF6, 0xA1, 0x99, 0x63, 0x0A, 0xD7, 0xFD, 0x70, 0xC3, 0xF6, 0x5E, 0x74, 0x13, 0x03, 0xC9, 0x0B, 0x04, 0x26, 0x98, 0xF7, 0x26, 0x8A, 0x92, 0x93, 0x25, 0xB0, 0xA2, 0x0D, 0x23, 0xED, 0x63, 0x79, 0x6D, 0x13, 0x32, 0xFA, 0x3C, 0x35, 0x02, 0x9A, 0xA3, 0xB3, 0xDD, 0x8E, 0x0A, 0x24, 0xBF, 0x51, 0xC3, 0x7C, 0xCD, 0x55, 0x9F, 0x37, 0xAF, 0x94, 0x4C, 0x29, 0x08, 0x52, 0x82, 0xB2, 0x3B, 0x4E, 0x37, 0x9F, 0x17, 0x07, 0x91, 0x11, 0x3B, 0xFD, 0xCD }; // For backwards compatibility, we must support salts padded with spaces to a field width of 40 static struct fmt_tests tests[] = { {"DDIC$6066CD3147915331EC4C602847D27A75EB3E8F0A", "DDIC"}, // invalid, because password is too short (would work during login, but not during password change), // magnum wants to keep thesse tests anyway, because they help verifying key buffer cleaning: {"F $646A0AD270DF651065669A45D171EDD62DFE39A1", "X"}, {"JOHNNY $7D79B478E70CAAE63C41E0824EAB644B9070D10A", "CYBERPUNK"}, {"VAN$D15597367F24090F0A501962788E9F19B3604E73", "hauser"}, {"ROOT$1194E38F14B9F3F8DA1B181F14DEB70E7BDCC239", "KID"}, // invalid, because password is too short (would work during login, but not during password change): {"MAN$22886450D0AB90FDA7F91C4F3DD5619175B372EA", "u"}, #if 0 // This test case is invalid since the user name can just be // up to 12 characters long. // So, unless the user name doesn't contain non-ascii characters, // it will not be longer than 12 bytes. // Also, "-------" is not a valid SAP password, since the first 3 characters // are identical. {"------------------------------------$463BDDCF2D2D6E07FC64C075A0802BD87A39BBA6", "-------"}, #else // SAP user name consisting of 12 consecutive EURO characters: {"\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac" "\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac" "$B20D15C088481780CD44FCF2003AAAFBD9710C7C", "--+----"}, #endif {"SAP* $60A0F7E06D95BC9FB45F605BDF1F7B660E5D5D4E", "MaStEr"}, {"DOLLAR$$$---$E0180FD4542D8B6715E7D0D9EDE7E2D2E40C3D4D", "Dollar$$$---"}, {NULL} }; static UTF8 (*saved_plain)[UTF8_PLAINTEXT_LENGTH + 1]; static int *keyLen; #ifdef MMX_COEF // max intermediate crypt size is 256 bytes // multiple key buffers for lengths > 55 #define LIMB 5 static unsigned char *saved_key[LIMB]; static unsigned char *crypt_key; static unsigned char *interm_crypt; static unsigned int *clean_pos; #else static UTF8 (*saved_key)[UTF8_PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; #endif static struct saltstruct { unsigned int l; unsigned char s[SALT_LENGTH]; } *cur_salt; static void init(struct fmt_main *self) { static int warned = 0; #ifdef MMX_COEF int i; #endif // This is needed in order NOT to upper-case german double-s // in UTF-8 mode. initUnicode(UNICODE_MS_NEW); if (!options.listconf && pers_opts.target_enc != UTF_8 && !(options.flags & FLG_TEST_CHK) && warned++ == 0) fprintf(stderr, "Warning: SAP-F/G format should always be UTF-8.\nConvert your input files to UTF-8 and use --input-encoding=utf8\n"); // Max 40 characters or 120 bytes of UTF-8, We actually do not truncate // multibyte input at 40 characters because it's too expensive. if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = UTF8_PLAINTEXT_LENGTH; #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT; #endif saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); keyLen = mem_calloc_tiny(sizeof(*keyLen) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #ifdef MMX_COEF clean_pos = mem_calloc_tiny(sizeof(*clean_pos) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); for(i = 0; i < LIMB; i++) saved_key[i] = mem_calloc_tiny(SHA_BUF_SIZ*4 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); interm_crypt = mem_calloc_tiny(20 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); crypt_key = mem_calloc_tiny(20 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); #else crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = saved_plain; #endif } static int valid(char *ciphertext, struct fmt_main *self) { int i, j; char *p; if (!ciphertext) return 0; p = strrchr(ciphertext, '$'); if (!p) return 0; if (p - ciphertext > SALT_FIELD_LENGTH) return 0; if (strlen(&p[1]) != BINARY_SIZE * 2) return 0; j = 0; for (i = 0; i < p - ciphertext; i++) { // even those lower case non-ascii characters with a // corresponding upper case character could be rejected if (ciphertext[i] >= 'a' && ciphertext[i] <= 'z') return 0; else if (ciphertext[i] & 0x80) j++; // Reject if user name is longer than 12 characters. // This is not accurate, but close enough. // To be exact, I'd need to keep j unchanged for // the first byte of each character, instead of // incrementing j for every byte >= 0x80. if (i >= USER_NAME_LENGTH + j && ciphertext[i] != ' ') return 0; } // SAP user name cannot start with ! or ? if (ciphertext[0] == '!' || ciphertext[0] == '?') return 0; p++; // SAP and sap2john.pl always use upper case A-F for hashes, // so don't allow a-f for (i = 0; i < BINARY_SIZE * 2; i++) if (!(((p[i]>='0' && p[i]<='9')) || ((p[i]>='A' && p[i]<='F')) )) return 0; return 1; } static void set_salt(void *salt) { cur_salt = salt; } static void *get_salt(char *ciphertext) { char *p; static struct saltstruct out; p = strrchr(ciphertext, '$'); out.l = (int)(p - ciphertext); memset(out.s, 0, sizeof(out.s)); memcpy(out.s, ciphertext, out.l); return &out; } static void clear_keys(void) { memset(keyLen, 0, sizeof(*keyLen) * omp_t * MAX_KEYS_PER_CRYPT); } static void set_key(char *key, int index) { memcpy((char*)saved_plain[index], key, UTF8_PLAINTEXT_LENGTH); keyLen[index] = -1; } static char *get_key(int index) { return (char*)saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef MMX_COEF unsigned int x,y=0; #ifdef _OPENMP for(;y<SHA1_SSE_PARA*omp_t;y++) #else for(;y<SHA1_SSE_PARA;y++) #endif for(x=0;x<MMX_COEF;x++) { if( ((unsigned int*)binary)[0] == ((unsigned int*)crypt_key)[x+y*MMX_COEF*5] ) return 1; } return 0; #else unsigned int index; for (index = 0; index < count; index++) if (!memcmp(binary, crypt_key[index], BINARY_SIZE)) return 1; return 0; #endif } static int cmp_exact(char *source, int index){ return 1; } static int cmp_one(void *binary, int index) { #ifdef MMX_COEF unsigned int x,y; x = index&(MMX_COEF-1); y = index>>(MMX_COEF>>1); if( (((unsigned int*)binary)[0] != ((unsigned int*)crypt_key)[x+y*MMX_COEF*5]) | (((unsigned int*)binary)[1] != ((unsigned int*)crypt_key)[x+y*MMX_COEF*5+MMX_COEF]) | (((unsigned int*)binary)[2] != ((unsigned int*)crypt_key)[x+y*MMX_COEF*5+2*MMX_COEF]) | (((unsigned int*)binary)[3] != ((unsigned int*)crypt_key)[x+y*MMX_COEF*5+3*MMX_COEF])| (((unsigned int*)binary)[4] != ((unsigned int*)crypt_key)[x+y*MMX_COEF*5+4*MMX_COEF]) ) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } /* * calculate the length of data that has to be hashed from the magic array. pass the first hash result in here. * this is part of the walld0rf-magic * The return value will always be between 32 and 82, inclusive */ #if MMX_COEF static inline unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray, unsigned int index) #else static inline unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray) #endif { unsigned int modSum = 0; #if MMX_COEF unsigned const char *p = &pbHashArray[GETOUTPOS(3, index)]; modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; p += 4*(MMX_COEF - 1); modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; p += 4*(MMX_COEF - 1) + 2; modSum += *p++ % 6; modSum += *p % 6; #else unsigned int i; for (i=0; i<=9; i++) modSum += pbHashArray[i] % 6; #endif return modSum + 0x20; //0x20 is hardcoded... } /* * Calculate the offset into the magic array. pass the first hash result in here * part of the walld0rf-magic * The return value will always be between 0 and 70, inclusive */ #if MMX_COEF static inline unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray, unsigned int index) #else static inline unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray) #endif { unsigned int modSum = 0; #if MMX_COEF unsigned const int *p = (unsigned int*)&pbHashArray[GETOUTPOS(11, index)]; unsigned int temp; temp = *p & 0x0707; modSum += (temp >> 8) + (unsigned char)temp; p += MMX_COEF; temp = *p & 0x07070707; modSum += (temp >> 24) + (unsigned char)(temp >> 16) + (unsigned char)(temp >> 8) + (unsigned char)temp; p += MMX_COEF; temp = *p & 0x07070707; modSum += (temp >> 24) + (unsigned char)(temp >> 16) + (unsigned char)(temp >> 8) + (unsigned char)temp; #else unsigned int i; for (i = 19; i >= 10; i--) modSum += pbHashArray[i] % 8; #endif return modSum; } #if MMX_COEF static inline void crypt_done(unsigned const int *source, unsigned int *dest, int index) { unsigned int i; unsigned const int *s = &source[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*5*MMX_COEF]; unsigned int *d = &dest[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*5*MMX_COEF]; for (i = 0; i < 5; i++) { *d = *s; s += MMX_COEF; d += MMX_COEF; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; #if MMX_COEF #if defined(_OPENMP) int t; #pragma omp parallel for for (t = 0; t < omp_t; t++) #define ti (t*NBKEYS+index) #else #define t 0 #define ti index #endif { unsigned int index, i, longest; int len; unsigned int crypt_len[NBKEYS]; longest = 0; for (index = 0; index < NBKEYS; index++) { // Store key into vector key buffer if ((len = keyLen[ti]) < 0) { ARCH_WORD_32 *keybuf_word = (ARCH_WORD_32*)&saved_key[0][GETSTARTPOS(ti)]; const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)saved_plain[ti]; ARCH_WORD_32 temp; len = 0; while(((unsigned char)(temp = *wkey++))) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP(temp & 0xff); len++; break; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP(temp & 0xffff); len+=2; break; } *keybuf_word = JOHNSWAP(temp); if (!(temp & 0xff000000)) { len+=3; break; } len += 4; if (len & 63) keybuf_word += MMX_COEF; else keybuf_word = (ARCH_WORD_32*)&saved_key[len>>6][GETSTARTPOS(ti)]; } // Back-out of trailing spaces while(len && saved_plain[ti][len - 1] == ' ') saved_plain[ti][--len] = 0; keyLen[ti] = len; } // 1. we need to SHA1 the password and username for (i = 0; i < cur_salt->l; i++) saved_key[(len+i)>>6][GETPOS((len + i), ti)] = cur_salt->s[i]; len += i; saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < (((len+8)>>6)+1)*64; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0; // This should do good but Valgrind insists it's a waste //if (clean_pos[ti] < i) // clean_pos[ti] = len + 1; if (len > longest) longest = len; ((unsigned int*)saved_key[(len+8)>>6])[15*MMX_COEF + (ti&3) + (ti>>2)*SHA_BUF_SIZ*MMX_COEF] = len << 3; crypt_len[index] = len; } SSESHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&crypt_key[t*20*NBKEYS], NULL, SSEi_MIXED_IN); // Do another and possibly a third limb memcpy(&interm_crypt[t*20*NBKEYS], &crypt_key[t*20*NBKEYS], 20*NBKEYS); for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SSESHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD); // Copy any output that is done now for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti); } longest = 0; for (index = 0; index < NBKEYS; index++) { unsigned int offsetMagicArray; unsigned int lengthIntoMagicArray; const unsigned char *p; int i; // If final crypt ends up to be 56-61 bytes (or so), this must be clean for (i = 0; i < LIMB; i++) if (keyLen[ti] < i * 64 + 55) ((unsigned int*)saved_key[i])[15*MMX_COEF + (ti&3) + (ti>>2)*SHA_BUF_SIZ*MMX_COEF] = 0; len = keyLen[ti]; lengthIntoMagicArray = extractLengthOfMagicArray(crypt_key, ti); offsetMagicArray = extractOffsetToMagicArray(crypt_key, ti); // 2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode... i = len - 1; p = &theMagicArray[offsetMagicArray]; // Copy a char at a time until aligned (at destination)... while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = *p++; // ...then a word at a time. This is a good boost, we are copying between 32 and 82 bytes here. for (;i < lengthIntoMagicArray + len; i += 4, p += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETWORDPOS(i, ti)] = JOHNSWAP(*(ARCH_WORD_32*)p); // Now, the salt. This is typically too short for the stunt above. for (i = 0; i < cur_salt->l; i++) saved_key[(len+lengthIntoMagicArray+i)>>6][GETPOS((len + lengthIntoMagicArray + i), ti)] = cur_salt->s[i]; len += lengthIntoMagicArray + cur_salt->l; saved_key[len>>6][GETPOS(len, ti)] = 0x80; crypt_len[index] = len; // Clean the rest of this buffer as needed i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < clean_pos[ti]; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0; clean_pos[ti] = len + 1; if (len > longest) longest = len; ((unsigned int*)saved_key[(len+8)>>6])[15*MMX_COEF + (ti&3) + (ti>>2)*SHA_BUF_SIZ*MMX_COEF] = len << 3; } SSESHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], NULL, SSEi_MIXED_IN); // Typically, no or very few crypts are done at this point so this is faster than to memcpy the lot for (index = 0; index < NBKEYS; index++) if (crypt_len[index] < 56) crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti); // Do another and possibly a third, fourth and fifth limb for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SSESHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD); // Copy any output that is done now for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti); } } #undef t #undef ti #else #ifdef _OPENMP int index; #pragma omp parallel for for (index = 0; index < count; index++) #else #define index 0 #endif { unsigned int offsetMagicArray, lengthIntoMagicArray; unsigned char temp_key[BINARY_SIZE]; unsigned char tempVar[UTF8_PLAINTEXT_LENGTH + MAGIC_ARRAY_SIZE + SALT_LENGTH]; //max size... SHA_CTX ctx; if (keyLen[index] < 0) { keyLen[index] = strlen((char*)saved_key[index]); // Back-out of trailing spaces while (saved_key[index][keyLen[index] - 1] == ' ') { saved_key[index][--keyLen[index]] = 0; if (keyLen[index] == 0) break; } } //1. we need to SHA1 the password and username memcpy(tempVar, saved_key[index], keyLen[index]); //first: the password memcpy(tempVar + keyLen[index], cur_salt->s, cur_salt->l); //second: the salt(username) SHA1_Init(&ctx); SHA1_Update(&ctx, tempVar, keyLen[index] + cur_salt->l); SHA1_Final((unsigned char*)temp_key, &ctx); lengthIntoMagicArray = extractLengthOfMagicArray(temp_key); offsetMagicArray = extractOffsetToMagicArray(temp_key); //2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode... memcpy(tempVar + keyLen[index], &theMagicArray[offsetMagicArray], lengthIntoMagicArray); memcpy(tempVar + keyLen[index] + lengthIntoMagicArray, cur_salt->s, cur_salt->l); SHA1_Init(&ctx); SHA1_Update(&ctx, tempVar, keyLen[index] + lengthIntoMagicArray + cur_salt->l); SHA1_Final((unsigned char*)crypt_key[index], &ctx); } #undef index #endif return count; } static void *binary(char *ciphertext) { static int outbuf[BINARY_SIZE / sizeof(int)]; char *realcipher = (char*)outbuf; int i; char* newCiphertextPointer; newCiphertextPointer = strrchr(ciphertext, '$') + 1; for(i=0;i<BINARY_SIZE;i++) { realcipher[i] = atoi16[ARCH_INDEX(newCiphertextPointer[i*2])]*16 + atoi16[ARCH_INDEX(newCiphertextPointer[i*2+1])]; } #ifdef MMX_COEF alter_endianity((unsigned char*)realcipher, BINARY_SIZE); #endif return (void*)realcipher; } #if 0 // Not possible with current interface static char *source(struct db_password *pw, char Buf[LINE_BUFFER_SIZE] ) { struct saltstruct *salt_s = (struct saltstruct*)(pw->source); unsigned char realcipher[BINARY_SIZE]; unsigned char *cpi; char *cpo; int i; memcpy(realcipher, pw->binary, BINARY_SIZE); #ifdef MMX_COEF alter_endianity(realcipher, BINARY_SIZE); #endif memcpy(Buf, salt_s->s, salt_s->l); cpo = &Buf[salt_s->l]; *cpo++ = '$'; cpi = realcipher; for (i = 0; i < BINARY_SIZE; ++i) { *cpo++ = itoa16u[(*cpi)>>4]; *cpo++ = itoa16u[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } #endif #ifdef MMX_COEF #define KEY_OFF ((index/MMX_COEF)*MMX_COEF*5+(index&(MMX_COEF-1))) static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[KEY_OFF] & 0xf; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[KEY_OFF] & 0xff; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[KEY_OFF] & 0xfff; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[KEY_OFF] & 0xffff; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[KEY_OFF] & 0xfffff; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[KEY_OFF] & 0xffffff; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[KEY_OFF] & 0x7ffffff; } #else static int get_hash_0(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xf; } static int get_hash_1(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xff; } static int get_hash_2(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xfff; } static int get_hash_3(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xffff; } static int get_hash_4(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xfffff; } static int get_hash_5(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xffffff; } static int get_hash_6(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0x7ffffff; } #endif // Here, we remove any salt padding and trim it to 36 bytes static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; char *p; int i; p = strrchr(ciphertext, '$'); i = (int)(p - ciphertext) - 1; while (ciphertext[i] == ' ' || i >= SALT_LENGTH) i--; i++; memset(out, 0, sizeof(out)); memcpy(out, ciphertext, i); strnzcpy(&out[i], p, CIPHERTEXT_LENGTH + 1 - i); return out; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { struct saltstruct *s = (struct saltstruct*)salt; unsigned int hash = 5381; unsigned int i; for (i = 0; i < s->l; i++) hash = ((hash << 5) + hash) ^ s->s[i]; return hash & (SALT_HASH_SIZE - 1); } static void done(void) { initUnicode(UNICODE_UNICODE); } struct fmt_main fmt_sapG = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if !defined(MMX_COEF) || defined(SHA1_SSE_PARA) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
CCHMetric.h
#pragma once #include <algorithm> #include <cassert> #include <cstdint> #include <utility> #include <vector> #include "Algorithms/CCH/CCH.h" #include "Algorithms/CH/CH.h" #include "DataStructures/Containers/ConcurrentLocalIdMap.h" #include "DataStructures/Graph/Attributes/TraversalCostAttribute.h" #include "DataStructures/Graph/Attributes/UnpackingInfoAttribute.h" #include "DataStructures/Graph/Graph.h" #include "Tools/Simd/AlignedVector.h" #include "Tools/ConcurrentHelpers.h" #include "Tools/Constants.h" #include "Tools/Workarounds.h" // This class encodes the actual cost of the edges in a customizable contraction hierarchy. It // stores the edge weights and contains several sequential and parallel customization algorithms. class CCHMetric { public: // Constructs an individual metric incorporating the specified input weights in the specified CCH. CCHMetric(const CCH& cch, const int32_t* const inputWeights) : cch(cch), inputWeights(inputWeights) { assert(inputWeights != nullptr); upWeights.resize(cch.getUpwardGraph().numEdges()); downWeights.resize(cch.getUpwardGraph().numEdges()); } // Incorporates the current input weights in this metric. void customize() { computeRespectingMetric(); computeCustomizedMetric(); } // Runs the perfect customization algorithm. void runPerfectCustomization() noexcept { runPerfectCustomization([](const int /*e*/) {}, [](const int /*e*/) {}); } // Returns a weighted CH having the smallest possible number of edges for the given order. CH buildMinimumWeightedCH() { const auto& cchGraph = cch.getUpwardGraph(); std::vector<int8_t> keepUpEdge; std::vector<int8_t> keepDownEdge; #pragma omp parallel sections { #pragma omp section keepUpEdge.resize(cchGraph.numEdges() + 1, true); #pragma omp section keepDownEdge.resize(cchGraph.numEdges() + 1, true); } keepUpEdge.back() = false; keepDownEdge.back() = false; customize(); runPerfectCustomization( [&](const int e) { keepUpEdge[e] = false; }, [&](const int e) { keepDownEdge[e] = false; }); ConcurrentLocalIdMap<4> upEdgeIdMap(keepUpEdge); ConcurrentLocalIdMap<4> downEdgeIdMap(keepDownEdge); const auto numUpEdges = upEdgeIdMap.numLocalIds(); const auto numDownEdges = downEdgeIdMap.numLocalIds(); AlignedVector<CH::SearchGraph::OutEdgeRange> upOutEdges; AlignedVector<CH::SearchGraph::OutEdgeRange> downOutEdges; AlignedVector<int32_t> upEdgeHeads; AlignedVector<int32_t> downEdgeHeads; AlignedVector<TraversalCostAttribute::Type> upEdgeWeights; AlignedVector<TraversalCostAttribute::Type> downEdgeWeights; AlignedVector<UnpackingInfoAttribute::Type> upUnpackingInfo; AlignedVector<UnpackingInfoAttribute::Type> downUnpackingInfo; #pragma omp parallel sections { #pragma omp section upOutEdges.resize(cchGraph.numVertices() + 1); #pragma omp section downOutEdges.resize(cchGraph.numVertices() + 1); #pragma omp section upEdgeHeads.resize(numUpEdges); #pragma omp section downEdgeHeads.resize(numDownEdges); #pragma omp section upEdgeWeights.resize(numUpEdges); #pragma omp section downEdgeWeights.resize(numDownEdges); #pragma omp section upUnpackingInfo.resize(numUpEdges); #pragma omp section downUnpackingInfo.resize(numDownEdges); } #pragma omp parallel for schedule(dynamic, 2048) FORALL_VERTICES(cchGraph, v) { upOutEdges[v].first() = upEdgeIdMap.numMappedGlobalIdsBefore(cchGraph.firstEdge(v)); downOutEdges[v].first() = downEdgeIdMap.numMappedGlobalIdsBefore(cchGraph.firstEdge(v)); } #pragma omp parallel for schedule(dynamic, 2048) FORALL_EDGES(cchGraph, e) { const auto tail = cchGraph.edgeTail(e); const auto head = cchGraph.edgeHead(e); if (keepUpEdge[e]) { const auto newIdx = upEdgeIdMap.toLocalId(e); upEdgeHeads[newIdx] = head; upEdgeWeights[newIdx] = upWeights[e]; const auto isShortcut = cch.forEachUpwardInputEdge(e, [&](const int inputEdge) { if (inputWeights[inputEdge] == upWeights[e]) { upUnpackingInfo[newIdx] = std::make_pair(inputEdge, INVALID_EDGE); return false; } return true; }); if (isShortcut) { const auto noTriangleFound = cch.forEachLowerTriangle( tail, head, e, [&](int, const int lower, const int inter) { if (downWeights[lower] + upWeights[inter] == upWeights[e] && keepDownEdge[lower] && keepUpEdge[inter]) { upUnpackingInfo[newIdx].first = downEdgeIdMap.toLocalId(lower); upUnpackingInfo[newIdx].second = upEdgeIdMap.toLocalId(inter); return false; } return true; }); unused(noTriangleFound); assert(!noTriangleFound); } } if (keepDownEdge[e]) { const auto newIdx = downEdgeIdMap.toLocalId(e); downEdgeHeads[newIdx] = head; downEdgeWeights[newIdx] = downWeights[e]; const auto isShortcut = cch.forEachDownwardInputEdge(e, [&](const int inputEdge) { if (inputWeights[inputEdge] == downWeights[e]) { downUnpackingInfo[newIdx] = std::make_pair(inputEdge, INVALID_EDGE); return false; } return true; }); if (isShortcut) { const auto noTriangleFound = cch.forEachLowerTriangle( tail, head, e, [&](int, const int lower, const int inter) { if (downWeights[inter] + upWeights[lower] == downWeights[e] && keepDownEdge[inter] && keepUpEdge[lower]) { downUnpackingInfo[newIdx].first = downEdgeIdMap.toLocalId(inter); downUnpackingInfo[newIdx].second = upEdgeIdMap.toLocalId(lower); return false; } return true; }); unused(noTriangleFound); assert(!noTriangleFound); } } } upOutEdges.back().first() = numUpEdges; downOutEdges.back().first() = numDownEdges; CH::SearchGraph upGraph( std::move(upOutEdges), std::move(upEdgeHeads), numUpEdges, std::move(upEdgeWeights), std::move(upUnpackingInfo)); CH::SearchGraph downGraph( std::move(downOutEdges), std::move(downEdgeHeads), numDownEdges, std::move(downEdgeWeights), std::move(downUnpackingInfo)); Permutation order; Permutation ranks; #pragma omp parallel sections { #pragma omp section order = cch.getContractionOrder(); #pragma omp section ranks = cch.getRanks(); } return {std::move(upGraph), std::move(downGraph), std::move(order), std::move(ranks)}; } private: // Computes a respecting metric. void computeRespectingMetric() { upWeights.resize(cch.getUpwardGraph().numEdges()); downWeights.resize(cch.getUpwardGraph().numEdges()); #pragma omp parallel for schedule(static) FORALL_EDGES(cch.getUpwardGraph(), e) { upWeights[e] = INFTY; downWeights[e] = INFTY; cch.forEachUpwardInputEdge(e, [&](const int inputEdge) { if (inputWeights[inputEdge] < upWeights[e]) upWeights[e] = inputWeights[inputEdge]; return true; }); cch.forEachDownwardInputEdge(e, [&](const int inputEdge) { if (inputWeights[inputEdge] < downWeights[e]) downWeights[e] = inputWeights[inputEdge]; return true; }); } } // Computes a customized metric given a respecting one. void computeCustomizedMetric() noexcept { #pragma omp parallel #pragma omp single nowait if (omp_get_num_threads() == 1) computeCustomizedMetricSequentially(); else computeCustomizedMetricInParallel(); } // Computes a customized metric sequentially. void computeCustomizedMetricSequentially() noexcept { cch.forEachVertexBottomUp([&](const int u) { FORALL_INCIDENT_EDGES(cch.getUpwardGraph(), u, lower) { const int v = cch.getUpwardGraph().edgeHead(lower); cch.forEachUpperTriangle(u, v, lower, [&](int, const int inter, const int upper) { if (downWeights[lower] + upWeights[inter] < upWeights[upper]) upWeights[upper] = downWeights[lower] + upWeights[inter]; if (downWeights[inter] + upWeights[lower] < downWeights[upper]) downWeights[upper] = downWeights[inter] + upWeights[lower]; return true; }); } }); } // Computes a customized metric in parallel. void computeCustomizedMetricInParallel() noexcept { cch.forEachVertexBottomUp([&](const int u) { FORALL_INCIDENT_EDGES(cch.getUpwardGraph(), u, lower) { const int v = cch.getUpwardGraph().edgeHead(lower); cch.forEachUpperTriangle(u, v, lower, [&](int, const int inter, const int upper) { atomicFetchMin(upWeights[upper], downWeights[lower] + upWeights[inter]); atomicFetchMin(downWeights[upper], downWeights[inter] + upWeights[lower]); return true; }); } }); } // Runs the perfect customization algorithm. template <typename T1, typename T2> void runPerfectCustomization(T1 markUpEdgeForRemoval, T2 markDownEdgeForRemoval) noexcept { #pragma omp parallel #pragma omp single nowait cch.forEachVertexTopDown([&](const int u) { FORALL_INCIDENT_EDGES(cch.getUpwardGraph(), u, lower) { const int v = cch.getUpwardGraph().edgeHead(lower); cch.forEachUpperTriangle(u, v, lower, [&](int, const int inter, const int upper) { if (upWeights[inter] + downWeights[upper] < upWeights[lower]) { upWeights[lower] = upWeights[inter] + downWeights[upper]; markUpEdgeForRemoval(lower); } if (upWeights[lower] + upWeights[upper] < upWeights[inter]) { upWeights[inter] = upWeights[lower] + upWeights[upper]; markUpEdgeForRemoval(inter); } if (upWeights[upper] + downWeights[inter] < downWeights[lower]) { downWeights[lower] = upWeights[upper] + downWeights[inter]; markDownEdgeForRemoval(lower); } if (downWeights[upper] + downWeights[lower] < downWeights[inter]) { downWeights[inter] = downWeights[upper] + downWeights[lower]; markDownEdgeForRemoval(inter); } return true; }); } }); } const CCH& cch; // The associated CCH. const int32_t* const inputWeights; // The weights of the input edges. std::vector<int32_t> upWeights; // The upward weights of the edges in the CCH. std::vector<int32_t> downWeights; // The downward weights of the edges in the CCH. };
GB_Matrix_wait.c
//------------------------------------------------------------------------------ // GB_Matrix_wait: finish all pending computations on a single matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLS: GB_builder // This function is typically called via the GB_MATRIX_WAIT(A) macro, except // for GB_assign, GB_subassign, and GB_mxm. // The matrix A has zombies and/or pending tuples placed there by // GrB_setElement, GrB_*assign, or GB_mxm. Zombies must now be deleted, and // pending tuples must now be assembled together and added into the matrix. // The indices in A might also be jumbled; if so, they are sorted now. // When the function returns, and all pending tuples and zombies have been // deleted. This is true even the function fails due to lack of memory (in // that case, the matrix is cleared as well). // If A is hypersparse, the time taken is at most O(nnz(A) + t log t), where t // is the number of pending tuples in A, and nnz(A) includes both zombies and // live entries. There is no O(m) or O(n) time component, if A is m-by-n. // If the number of non-empty vectors of A grows too large, then A can be // converted to non-hypersparse. // If A is non-hypersparse, then O(n) is added in the worst case, to prune // zombies and to update the vector pointers for A. // If the method is successful, it does an OpenMP flush just before returning. #include "GB_select.h" #include "GB_add.h" #include "GB_Pending.h" #include "GB_build.h" #include "GB_jappend.h" #define GB_FREE_ALL \ { \ GB_phbix_free (A) ; \ GB_Matrix_free (&T) ; \ GB_Matrix_free (&S) ; \ GB_Matrix_free (&A1) ; \ } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Matrix_wait // finish all pending computations ( GrB_Matrix A, // matrix with pending computations GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Matrix T = NULL, S = NULL, A1 = NULL ; GrB_Info info = GrB_SUCCESS ; ASSERT_MATRIX_OK (A, "A to wait", GB_FLIP (GB0)) ; if (GB_IS_FULL (A) || GB_IS_BITMAP (A)) { // full and bitmap matrices never have any pending work ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; // ensure the matrix is written to memory #pragma omp flush return (GrB_SUCCESS) ; } // only sparse and hypersparse matrices can have pending work ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; //-------------------------------------------------------------------------- // get the zombie and pending count, and burble if work needs to be done //-------------------------------------------------------------------------- int64_t nzombies = A->nzombies ; int64_t npending = GB_Pending_n (A) ; if (nzombies > 0 || npending > 0 || A->jumbled) { GB_BURBLE_MATRIX (A, "(wait: " GBd " %s, " GBd " pending%s) ", nzombies, (nzombies == 1) ? "zombie" : "zombies", npending, A->jumbled ? ", jumbled" : "") ; } //-------------------------------------------------------------------------- // determine the max # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // assemble the pending tuples into T //-------------------------------------------------------------------------- int64_t tnz = 0 ; if (npending > 0) { //---------------------------------------------------------------------- // construct a new hypersparse matrix T with just the pending tuples //---------------------------------------------------------------------- // T has the same type as A->type, which can differ from the type of // the pending tuples, A->Pending->type. The Pending->op can be NULL // (an implicit SECOND function), or it can be any accum operator. The // z=accum(x,y) operator can have any types, and it does not have to be // associative. info = GB_builder ( &T, // create T A->type, // T->type = A->type A->vlen, // T->vlen = A->vlen A->vdim, // T->vdim = A->vdim A->is_csc, // T->is_csc = A->is_csc &(A->Pending->i), // iwork_handle, becomes T->i on output &(A->Pending->j), // jwork_handle, free on output &(A->Pending->x), // Swork_handle, free on output A->Pending->sorted, // tuples may or may not be sorted false, // there might be duplicates; look for them A->Pending->nmax, // size of Pending->[ijx] arrays true, // is_matrix: unused NULL, NULL, NULL, // original I,J,S tuples, not used here npending, // # of tuples A->Pending->op, // dup operator for assembling duplicates A->Pending->type->code, // type of Pending->x Context ) ; //---------------------------------------------------------------------- // free pending tuples //---------------------------------------------------------------------- // The tuples have been converted to T, which is more compact, and // duplicates have been removed. The following work needs to be done // even if the builder fails. // GB_builder frees A->Pending->j and A->Pending->x. If successful, // A->Pending->i is now T->i. Otherwise A->Pending->i is freed. In // both cases, A->Pending->i is NULL. ASSERT (A->Pending->i == NULL) ; ASSERT (A->Pending->j == NULL) ; ASSERT (A->Pending->x == NULL) ; // free the list of pending tuples GB_Pending_free (&(A->Pending)) ; ASSERT (!GB_PENDING (A)) ; ASSERT_MATRIX_OK (A, "A after moving pending tuples to T", GB0) ; //---------------------------------------------------------------------- // check the status of the builder //---------------------------------------------------------------------- // Finally check the status of the builder. The pending tuples, must // be freed (just above), whether or not the builder is successful. if (info != GrB_SUCCESS) { // out of memory in GB_builder GB_FREE_ALL ; return (info) ; } ASSERT_MATRIX_OK (T, "T = hypersparse matrix of pending tuples", GB0) ; ASSERT (GB_IS_HYPERSPARSE (T)) ; ASSERT (!GB_ZOMBIES (T)) ; ASSERT (!GB_JUMBLED (T)) ; ASSERT (!GB_PENDING (T)) ; tnz = GB_NNZ (T) ; ASSERT (tnz > 0) ; } //-------------------------------------------------------------------------- // delete zombies //-------------------------------------------------------------------------- // A zombie is an entry A(i,j) in the matrix that as been marked for // deletion, but hasn't been deleted yet. It is marked by "negating" // replacing its index i with GB_FLIP(i). // TODO: pass tnz to GB_selector, to pad the reallocated A matrix if (nzombies > 0) { // remove all zombies from A #ifdef GB_DEBUG int64_t anz_orig = GB_NNZ (A) ; #endif GB_OK (GB_selector (NULL /* A in-place */, GB_NONZOMBIE_opcode, NULL, false, A, 0, NULL, Context)) ; ASSERT (A->nzombies == (anz_orig - GB_NNZ (A))) ; A->nzombies = 0 ; } ASSERT_MATRIX_OK (A, "A after zombies removed", GB0) ; // all the zombies are gone, and pending tuples are now in T ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; //-------------------------------------------------------------------------- // unjumble the matrix //-------------------------------------------------------------------------- GB_OK (GB_unjumble (A, Context)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; //-------------------------------------------------------------------------- // check for pending tuples //-------------------------------------------------------------------------- if (npending == 0) { // conform A to its desired sparsity structure and return result info = GB_conform (A, Context) ; #pragma omp flush return (info) ; } //-------------------------------------------------------------------------- // check for quick transplant //-------------------------------------------------------------------------- int64_t anz = GB_NNZ (A) ; if (anz == 0) { // A has no entries so just transplant T into A, then free T and // conform A to its desired hypersparsity. info = GB_transplant_conform (A, A->type, &T, Context) ; #pragma omp flush return (info) ; } //-------------------------------------------------------------------------- // determine the method for A = A+T //-------------------------------------------------------------------------- // If anz > 0, T is hypersparse, even if A is a GrB_Vector ASSERT (GB_IS_HYPERSPARSE (T)) ; ASSERT (tnz > 0) ; ASSERT (T->nvec > 0) ; ASSERT (A->nvec > 0) ; // tjfirst = first vector in T int64_t tjfirst = T->h [0] ; int64_t anz0 = 0 ; int64_t kA = 0 ; int64_t jlast ; int64_t *GB_RESTRICT Ap = A->p ; int64_t *GB_RESTRICT Ah = A->h ; int64_t *GB_RESTRICT Ai = A->i ; GB_void *GB_RESTRICT Ax = (GB_void *) A->x ; int64_t anvec = A->nvec ; int64_t asize = A->type->size ; // anz0 = nnz (A0) = nnz (A (:, 0:tjfirst-1)), the region not modified by T if (A->h != NULL) { // find tjfirst in A->h int64_t pright = anvec - 1 ; bool found ; GB_SPLIT_BINARY_SEARCH (tjfirst, A->h, kA, pright, found) ; // A->h [0 ... kA-1] excludes vector tjfirst. The list // A->h [kA ... anvec-1] includes tjfirst. ASSERT (kA >= 0 && kA <= anvec) ; ASSERT (GB_IMPLIES (kA > 0 && kA < anvec, A->h [kA-1] < tjfirst)) ; ASSERT (GB_IMPLIES (found, A->h [kA] == tjfirst)) ; jlast = (kA > 0) ? A->h [kA-1] : (-1) ; } else { kA = tjfirst ; jlast = tjfirst - 1 ; } // anz1 = nnz (A1) = nnz (A (:, kA:end)), the region modified by T anz0 = A->p [kA] ; int64_t anz1 = anz - anz0 ; bool ignore ; // A + T will have anz_new entries int64_t anz_new = anz + tnz ; // must have at least this space if (2 * anz1 < anz0) { //---------------------------------------------------------------------- // append new tuples to A //---------------------------------------------------------------------- // A is growing incrementally. It splits into two parts: A = [A0 A1]. // where A0 = A (:, 0:kA-1) and A1 = A (:, kA:end). The // first part (A0 with anz0 = nnz (A0) entries) is not modified. The // second part (A1, with anz1 = nnz (A1) entries) overlaps with T. // If anz1 is zero, or small compared to anz0, then it is faster to // leave A0 unmodified, and to update just A1. // TODO: if A also had zombies, GB_selector could pad A so that // A->nzmax = anz + tnz. // make sure A has enough space for the new tuples if (anz_new > A->nzmax) { // double the size if not enough space GB_OK (GB_ix_resize (A, anz_new, Context)) ; Ai = A->i ; Ax = (GB_void *) A->x ; } //---------------------------------------------------------------------- // T = A1 + T //---------------------------------------------------------------------- if (anz1 > 0) { //------------------------------------------------------------------ // extract A1 = A (:, kA:end) as a shallow copy //------------------------------------------------------------------ // A1 = [0, A (:, kA:end)], hypersparse with same dimensions as A GB_OK (GB_new (&A1, // hyper, new header A->type, A->vlen, A->vdim, GB_Ap_malloc, A->is_csc, GxB_HYPERSPARSE, GB_ALWAYS_HYPER, anvec - kA, Context)) ; // the A1->i and A1->x content are shallow copies of A(:,kA:end) A1->x = (void *) (Ax + asize * anz0) ; A1->i = Ai + anz0 ; A1->x_shallow = true ; A1->i_shallow = true ; A1->nzmax = anz1 ; // fill the column A1->h and A1->p with A->h and A->p, shifted int64_t *GB_RESTRICT A1p = A1->p ; int64_t *GB_RESTRICT A1h = A1->h ; int64_t a1nvec = 0 ; for (int64_t k = kA ; k < anvec ; k++) { // get A (:,k) int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; if (pA_end > pA_start) { // add this column to A1 if A (:,k) is not empty int64_t j = GBH (Ah, k) ; A1p [a1nvec] = pA_start - anz0 ; A1h [a1nvec] = j ; a1nvec++ ; } } // finalize A1 A1p [a1nvec] = anz1 ; A1->nvec = a1nvec ; A1->nvec_nonempty = a1nvec ; A1->magic = GB_MAGIC ; ASSERT_MATRIX_OK (A1, "A1 slice for GB_Matrix_wait", GB0) ; //------------------------------------------------------------------ // S = A1 + T, with no operator or mask //------------------------------------------------------------------ GB_OK (GB_add (&S, A->type, A->is_csc, NULL, 0, 0, &ignore, A1, T, NULL, Context)) ; ASSERT_MATRIX_OK (S, "S = A1+T", GB0) ; // free A1 and T GB_Matrix_free (&T) ; GB_Matrix_free (&A1) ; //------------------------------------------------------------------ // replace T with S //------------------------------------------------------------------ T = S ; S = NULL ; tnz = GB_NNZ (T) ; //------------------------------------------------------------------ // remove A1 from the vectors of A, if A is hypersparse //------------------------------------------------------------------ if (A->h != NULL) { A->nvec = kA ; } } //---------------------------------------------------------------------- // append T to the end of A0 //---------------------------------------------------------------------- const int64_t *GB_RESTRICT Tp = T->p ; const int64_t *GB_RESTRICT Th = T->h ; const int64_t *GB_RESTRICT Ti = T->i ; const GB_void *GB_RESTRICT Tx = (GB_void *) T->x ; int64_t tnvec = T->nvec ; anz = anz0 ; int64_t anz_last = anz ; int nthreads = GB_nthreads (tnz, chunk, nthreads_max) ; // append the indices and values of T to the end of A GB_memcpy (Ai + anz , Ti, tnz * sizeof (int64_t), nthreads) ; GB_memcpy (Ax + anz * asize, Tx, tnz * asize , nthreads) ; // append the vectors of T to the end of A for (int64_t k = 0 ; k < tnvec ; k++) { int64_t j = Th [k] ; ASSERT (j >= tjfirst) ; anz += (Tp [k+1] - Tp [k]) ; GB_OK (GB_jappend (A, j, &jlast, anz, &anz_last, Context)) ; } GB_jwrapup (A, jlast, anz) ; ASSERT (anz == anz_new) ; // need to recompute the # of non-empty vectors in GB_conform A->nvec_nonempty = -1 ; // recomputed just below ASSERT_MATRIX_OK (A, "A after GB_Matrix_wait:append", GB0) ; GB_Matrix_free (&T) ; // conform A to its desired sparsity structure info = GB_conform (A, Context) ; } else { //---------------------------------------------------------------------- // A = A+T //---------------------------------------------------------------------- // The update is not incremental since most of A is changing. Just do // a single parallel add: S=A+T, free T, and then transplant S back // into A. The nzmax of A is tight, with no room for future // incremental growth. // FUTURE:: if GB_add could tolerate zombies in A, then the initial // prune of zombies can be skipped. GB_OK (GB_add (&S, A->type, A->is_csc, NULL, 0, 0, &ignore, A, T, NULL, Context)) ; GB_Matrix_free (&T) ; ASSERT_MATRIX_OK (S, "S after GB_Matrix_wait:add", GB0) ; info = GB_transplant_conform (A, A->type, &S, Context) ; } //-------------------------------------------------------------------------- // flush the matrix and return result //-------------------------------------------------------------------------- #pragma omp flush return (info) ; }
LagrangeInterpolation.c
#include<stdio.h> int main() { float x[100],y[100],a,s=1,t=1,k=0; int n,i,j,d=1; printf("\n\n Enter the number of the terms of the table: "); scanf("%d",&n); printf("\n\n Enter the respective values of the variables x and y: \n"); for(i=0; i<n; i++) { scanf ("%f",&x[i]); scanf("%f",&y[i]); } printf("\n\n The table you entered is as follows :\n\n"); for(i=0; i<n; i++) { printf("%0.3f\t%0.3f",x[i],y[i]); printf("\n"); } //while(d==1) //{ printf(" \n\n\n Enter the value of the x to find the respective value of y\n\n\n"); scanf("%f",&a); #pragma omp parallel for private(i, j, s,t) shared( x, y) reduction(+:k) for(i=0; i<n; i++) { s=1; t=1; for(j=0; j<n; j++) { if(j!=i) { s=s*(a-x[j]); t=t*(x[i]-x[j]); } } k+=((s/t)*y[i]); } printf("\n\n The respective value of the variable y is: %f",k); printf("\n\n Do you want to continue?\n\n Press 1 to continue and any other key to exit"); scanf("%d",&d); //} return 0; }
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImage) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if (((sharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImage) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) ResetMagickMemory(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) ResetMagickMemory(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,kuwahara_image,image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_KuwaharaImage) #endif proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*mult), q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RotationalBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImage) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post)+ GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre)- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if (((shade_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(linear_image,center) <= (QuantumRange/2))) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if (((unsharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImage) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
SpatialConvolutionMap.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionMap.c" #else static int nn_(SpatialConvolutionMap_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_(Tensor_id)); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id)); THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_(Tensor_id)); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); luaL_argcheck(L, input->nDimension == 3, 2, "3D tensor expected"); luaL_argcheck(L, input->size[0] == nInputPlane, 2, "invalid number of input planes"); luaL_argcheck(L, input->size[2] >= kW && input->size[1] >= kH, 2, "input image smaller than kernel size"); THTensor_(resize3d)(output, nOutputPlane, (input->size[1] - kH) / dH + 1, (input->size[2] - kW) / dW + 1); // contiguous input = THTensor_(newContiguous)(input); output = THTensor_(newContiguous)(output); // get raw pointers real *input_data = THTensor_(data)(input); real *output_data = THTensor_(data)(output); real *weight_data = THTensor_(data)(weight); real *bias_data = THTensor_(data)(bias); real *connTable_data = THTensor_(data)(connTable); // and dims long input_h = input->size[1]; long input_w = input->size[2]; long output_h = output->size[1]; long output_w = output->size[2]; long weight_h = weight->size[1]; long weight_w = weight->size[2]; long p; #pragma omp parallel for private(p) for (p = 0; p < nOutputPlane; p++) { // add bias real *ptr_output = output_data + p*output_w*output_h; long j; for(j = 0; j < output_h*output_w; j++) ptr_output[j] = bias_data[p]; // convolve all maps int nweight = connTable->size[0]; long k; for (k = 0; k < nweight; k++) { // get offsets for input/output int o = (int)connTable_data[k*2+1]-1; int i = (int)connTable_data[k*2+0]-1; if (o == p) { THTensor_(validXCorr2Dptr)(output_data + o*output_w*output_h, 1.0, input_data + i*input_w*input_h, input_h, input_w, weight_data + k*weight_w*weight_h, weight_h, weight_w, dH, dW); } } } // clean up THTensor_(free)(input); THTensor_(free)(output); return 1; } static int nn_(SpatialConvolutionMap_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane"); THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_(Tensor_id)); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id)); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id)); // contiguous gradInput = THTensor_(newContiguous)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); // Resize/Zero THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); // get raw pointers real *gradInput_data = THTensor_(data)(gradInput); real *gradOutput_data = THTensor_(data)(gradOutput); real *weight_data = THTensor_(data)(weight); real *connTable_data = THTensor_(data)(connTable); // and dims long input_h = input->size[1]; long input_w = input->size[2]; long output_h = gradOutput->size[1]; long output_w = gradOutput->size[2]; long weight_h = weight->size[1]; long weight_w = weight->size[2]; long p; #pragma omp parallel for private(p) for(p = 0; p < nInputPlane; p++) { long k; // backward all int nkernel = connTable->size[0]; for(k = 0; k < nkernel; k++) { int o = (int)connTable_data[k*2+1]-1; int i = (int)connTable_data[k*2+0]-1; if (i == p) { // gradient to input THTensor_(fullConv2Dptr)(gradInput_data + i*input_w*input_h, 1.0, gradOutput_data + o*output_w*output_h, output_h, output_w, weight_data + k*weight_w*weight_h, weight_h, weight_w, dH, dW); } } } // clean up THTensor_(free)(gradInput); THTensor_(free)(gradOutput); return 1; } static int nn_(SpatialConvolutionMap_accGradParameters)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); real scale = luaL_optnumber(L, 4, 1); THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_(Tensor_id)); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id)); THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_(Tensor_id)); THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_(Tensor_id)); // contiguous input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); // get raw pointers real *input_data = THTensor_(data)(input); real *gradOutput_data = THTensor_(data)(gradOutput); real *gradWeight_data = THTensor_(data)(gradWeight); real *gradBias_data = THTensor_(data)(gradBias); // and dims long input_h = input->size[1]; long input_w = input->size[2]; long output_h = gradOutput->size[1]; long output_w = gradOutput->size[2]; long weight_h = weight->size[1]; long weight_w = weight->size[2]; // gradients wrt bias long k; #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { real *ptr_gradOutput = gradOutput_data + k*output_w*output_h; long l; for(l = 0; l < output_h*output_w; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; } // gradients wrt weight int nkernel = connTable->size[0]; #pragma omp parallel for private(k) for(k = 0; k < nkernel; k++) { int o = (int)THTensor_(get2d)(connTable,k,1)-1; int i = (int)THTensor_(get2d)(connTable,k,0)-1; // gradient to kernel THTensor_(validXCorr2DRevptr)(gradWeight_data + k*weight_w*weight_h, scale, input_data + i*input_w*input_h, input_h, input_w, gradOutput_data + o*output_w*output_h, output_h, output_w, dH, dW); } // clean up THTensor_(free)(input); THTensor_(free)(gradOutput); return 0; } static const struct luaL_Reg nn_(SpatialConvolutionMap__) [] = { {"SpatialConvolutionMap_updateOutput", nn_(SpatialConvolutionMap_updateOutput)}, {"SpatialConvolutionMap_updateGradInput", nn_(SpatialConvolutionMap_updateGradInput)}, {"SpatialConvolutionMap_accGradParameters", nn_(SpatialConvolutionMap_accGradParameters)}, {NULL, NULL} }; static void nn_(SpatialConvolutionMap_init)(lua_State *L) { luaT_pushmetaclass(L, torch_(Tensor_id)); luaT_registeratname(L, nn_(SpatialConvolutionMap__), "nn"); lua_pop(L,1); } #endif
utils.h
/********** C++ Routines for Linear Algebra Operations. Copyright (C) 2020-2021 Chunlin Li This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. **********/ #ifndef _UTILS_H_ #define _UTILS_H_ #include <immintrin.h> //#include "omp.h" inline double accumulate(const double *first1, const double *last1, double init) { for (; first1 != last1; ++first1) { init += *first1; } return init; } inline double inner_product(const double *first1, const double *last1, const double *first2, const double *weight, double init) { for (; first1 != last1; ++first1, ++first2, ++weight) { init += *first1 * *first2 * *weight; } return init; } inline double inner_product(const double *first1, const double *last1, const double *first2, double init) { for (; first1 != last1; ++first1, ++first2) { init += *first1 * *first2; } return init; } // inner product with stride inline double inner_product(const double *first1, const double *first2, const int stride1, const int stride2, const int len, double init) { for (int i = 0; i != len; ++i, first1 += stride1, first2 += stride1) { init += *first1 * *first2; } return init; } // sparse inner product inline double inner_product_sparse(const double *sparse1, const double *dense2, const int *index1, const int len_index1, const int stride2, double init) { for (int i = 0; i != len_index1; ++i) { int j = index1[i]; init += sparse1[j] * dense2[i * stride2]; } return init; } // AVX inline double inner_product_simd(const double *array1, const double *array2, const int length, double init) { __m256d vsum = _mm256_setzero_pd(); int i = 0; if (length >= 8 * 4) { __m256d vsum1 = _mm256_setzero_pd(); __m256d vsum2 = _mm256_setzero_pd(); __m256d vsum3 = _mm256_setzero_pd(); __m256d vsum4 = _mm256_setzero_pd(); __m256d vsum5 = _mm256_setzero_pd(); __m256d vsum6 = _mm256_setzero_pd(); __m256d vsum7 = _mm256_setzero_pd(); __m256d vsum8 = _mm256_setzero_pd(); for (; i + 8 * 4 - 1 < length; i += 8 * 4) { // could unroll further vsum1 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i), _mm256_loadu_pd(array2 + i), vsum1); vsum2 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 4), _mm256_loadu_pd(array2 + i + 4), vsum2); vsum3 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 8), _mm256_loadu_pd(array2 + i + 8), vsum3); vsum4 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 12), _mm256_loadu_pd(array2 + i + 12), vsum4); vsum5 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 16), _mm256_loadu_pd(array2 + i + 16), vsum5); vsum6 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 20), _mm256_loadu_pd(array2 + i + 20), vsum6); vsum7 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 24), _mm256_loadu_pd(array2 + i + 24), vsum7); vsum8 = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 28), _mm256_loadu_pd(array2 + i + 28), vsum8); } vsum1 = _mm256_add_pd(vsum1, vsum2); vsum3 = _mm256_add_pd(vsum3, vsum4); vsum5 = _mm256_add_pd(vsum5, vsum6); vsum7 = _mm256_add_pd(vsum7, vsum8); vsum1 = _mm256_add_pd(vsum1, vsum3); vsum5 = _mm256_add_pd(vsum5, vsum7); vsum = _mm256_add_pd(vsum1, vsum5); } for (; i + 3 < length; i += 4) { // could unroll further vsum = _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i), _mm256_loadu_pd(array2 + i), vsum); } double buffer[4]; _mm256_storeu_pd(buffer, vsum); init += buffer[0] + buffer[1] + buffer[2] + buffer[3]; for (; i < length; ++i) { init += array1[i] * array2[i]; } return init; } inline double inner_product_simd(const double *array1, const double *array2, const double *array3, const int length, double init) { __m256d vsum = _mm256_setzero_pd(); int i = 0; if (length >= 8 * 4) { __m256d vsum1 = _mm256_setzero_pd(); __m256d vsum2 = _mm256_setzero_pd(); __m256d vsum3 = _mm256_setzero_pd(); __m256d vsum4 = _mm256_setzero_pd(); __m256d vsum5 = _mm256_setzero_pd(); __m256d vsum6 = _mm256_setzero_pd(); __m256d vsum7 = _mm256_setzero_pd(); __m256d vsum8 = _mm256_setzero_pd(); for (; i + 8 * 4 - 1 < length; i += 8 * 4) { // could unroll further vsum1 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i), _mm256_loadu_pd(array2 + i)), _mm256_loadu_pd(array3 + i), vsum1); vsum2 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i + 4), _mm256_loadu_pd(array2 + i + 4)), _mm256_loadu_pd(array3 + i + 4), vsum2); vsum3 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i + 8), _mm256_loadu_pd(array2 + i + 8)), _mm256_loadu_pd(array3 + i + 8), vsum3); vsum4 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i + 12), _mm256_loadu_pd(array2 + i + 12)), _mm256_loadu_pd(array3 + i + 12), vsum4); vsum5 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i + 16), _mm256_loadu_pd(array2 + i + 16)), _mm256_loadu_pd(array3 + i + 16), vsum5); vsum6 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i + 20), _mm256_loadu_pd(array2 + i + 20)), _mm256_loadu_pd(array3 + i + 20), vsum6); vsum7 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i + 24), _mm256_loadu_pd(array2 + i + 24)), _mm256_loadu_pd(array3 + i + 24), vsum7); vsum8 = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i + 28), _mm256_loadu_pd(array2 + i + 28)), _mm256_loadu_pd(array3 + i + 28), vsum8); } vsum1 = _mm256_add_pd(vsum1, vsum2); vsum3 = _mm256_add_pd(vsum3, vsum4); vsum5 = _mm256_add_pd(vsum5, vsum6); vsum7 = _mm256_add_pd(vsum7, vsum8); vsum1 = _mm256_add_pd(vsum1, vsum3); vsum5 = _mm256_add_pd(vsum5, vsum7); vsum = _mm256_add_pd(vsum1, vsum5); } for (; i + 3 < length; i += 4) { // could unroll further vsum = _mm256_fmadd_pd(_mm256_mul_pd(_mm256_loadu_pd(array1 + i), _mm256_loadu_pd(array2 + i)), _mm256_loadu_pd(array3 + i), vsum); } double buffer[4]; _mm256_storeu_pd(buffer, vsum); init += buffer[0] + buffer[1] + buffer[2] + buffer[3]; for (; i < length; ++i) { init += array1[i] * array2[i] * array3[i]; } return init; } // vector addition: scalar1 * vector1[first1:last1) + scalar2 * vector2[first2:) inline void vec_add(const double *first1, const double *first2, const double scalar1, const double scalar2, const int len, double *dest) { //#pragma omp simd for (int i = 0; i != len; ++i) { dest[i] = scalar1 * first1[i] + scalar2 * first2[i]; } } inline void vec_add(const double *first1, const double scalar1, const double scalar2, const int len, double *dest) { //#pragma omp simd for (int i = 0; i != len; ++i) { dest[i] = scalar1 * first1[i] + scalar2; } } inline void vec_add_simd(const double *array1, const double scalar1, const double scalar2, const int length, double *dest) { __m256d a = _mm256_set_pd(scalar1, scalar1, scalar1, scalar1); __m256d b = _mm256_set_pd(scalar2, scalar2, scalar2, scalar2); int i = 0; if (length >= 8 * 4) { for (; i + 8 * 4 - 1 < length; i += 8 * 4) { _mm256_storeu_pd(dest + i, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i), a, b)); _mm256_storeu_pd(dest + i + 4, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 4), a, b)); _mm256_storeu_pd(dest + i + 8, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 8), a, b)); _mm256_storeu_pd(dest + i + 12, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 12), a, b)); _mm256_storeu_pd(dest + i + 16, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 16), a, b)); _mm256_storeu_pd(dest + i + 20, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 20), a, b)); _mm256_storeu_pd(dest + i + 24, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 24), a, b)); _mm256_storeu_pd(dest + i + 28, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 28), a, b)); } } for (; i + 3 < length; i += 4) { _mm256_storeu_pd(dest + i, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i), a, b)); } for (; i < length; ++i) { dest[i] = scalar1 * array1[i] + scalar2; } } inline void vec_add_simd(const double *array1, const double *array2, const double scalar1, const double scalar2, const int length, double *dest) { __m256d a = _mm256_set_pd(scalar1, scalar1, scalar1, scalar1); __m256d b = _mm256_set_pd(scalar2, scalar2, scalar2, scalar2); int i = 0; if (length >= 8 * 4) { for (; i + 8 * 4 - 1 < length; i += 8 * 4) { _mm256_storeu_pd(dest + i, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i), b))); _mm256_storeu_pd(dest + i + 4, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 4), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i + 4), b))); _mm256_storeu_pd(dest + i + 8, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 8), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i + 8), b))); _mm256_storeu_pd(dest + i + 12, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 12), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i + 12), b))); _mm256_storeu_pd(dest + i + 16, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 16), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i + 16), b))); _mm256_storeu_pd(dest + i + 20, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 20), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i + 20), b))); _mm256_storeu_pd(dest + i + 24, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 24), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i + 24), b))); _mm256_storeu_pd(dest + i + 28, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i + 28), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i + 28), b))); } } for (; i + 3 < length; i += 4) { _mm256_storeu_pd(dest + i, _mm256_fmadd_pd(_mm256_loadu_pd(array1 + i), a, _mm256_mul_pd(_mm256_loadu_pd(array2 + i), b))); } for (; i < length; ++i) { dest[i] = scalar1 * array1[i] + scalar2 * array2[i]; } } // compute soft-thresholding function inline double soft_thresh(double init, double thresh) { if (init > thresh) init -= thresh; else if (init < -thresh) init += thresh; else init = 0.0; return init; } #endif
graph_io.h
// Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <cxh@illinois.edu> and Pingfan Li <lipingfan@163.com> #include <vector> #include <set> #include <iostream> #include <fstream> #include <sstream> #include <string.h> #include <algorithm> //#include <iomanip> typedef float WeightT; struct Edge { int dst; WeightT wt; }; bool compare_id(Edge a, Edge b) { return (a.dst < b.dst); } void fill_data(int m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, vector<vector<Edge> > vertices, bool symmetrize, bool sorted, bool remove_selfloops, bool remove_redundents) { //sort the neighbor list if(sorted) { printf("Sorting the neighbor lists..."); for(int i = 0; i < m; i++) { std::sort(vertices[i].begin(), vertices[i].end(), compare_id); } printf(" Done\n"); } //remove self loops int num_selfloops = 0; if(remove_selfloops) { printf("Removing self loops..."); for(int i = 0; i < m; i++) { for(unsigned j = 0; j < vertices[i].size(); j ++) { if(i == vertices[i][j].dst) { vertices[i].erase(vertices[i].begin()+j); num_selfloops ++; j --; } } } printf(" %d selfloops are removed\n", num_selfloops); } // remove redundent int num_redundents = 0; if(remove_redundents) { printf("Removing redundent edges..."); for (int i = 0; i < m; i++) { for (unsigned j = 1; j < vertices[i].size(); j ++) { if (vertices[i][j].dst == vertices[i][j-1].dst) { vertices[i].erase(vertices[i].begin()+j); num_redundents ++; j --; } } } printf(" %d redundent edges are removed\n", num_redundents); } /* // print some neighbor lists for (int i = 0; i < 3; i++) { cout << "src " << i << ": "; for (int j = 0; j < vertices[i].size(); j ++) cout << vertices[i][j].dst << " "; cout << endl; } */ row_offsets = (int *)malloc((m + 1) * sizeof(int)); int count = 0; for (int i = 0; i < m; i++) { row_offsets[i] = count; count += vertices[i].size(); } row_offsets[m] = count; if (symmetrize) { if(count != nnz) { nnz = count; } } else { if (count + num_selfloops + num_redundents != nnz) printf("Error reading graph, number of edges in edge list %d != %d\n", count, nnz); nnz = count; } printf("num_vertices %d num_edges %d\n", m, nnz); /* double avgdeg; double variance = 0.0; int maxdeg = 0; int mindeg = m; avgdeg = (double)nnz / m; for (int i = 0; i < m; i++) { int deg_i = row_offsets[i + 1] - row_offsets[i]; if (deg_i > maxdeg) maxdeg = deg_i; if (deg_i < mindeg) mindeg = deg_i; variance += (deg_i - avgdeg) * (deg_i - avgdeg) / m; } printf("min_degree %d max_degree %d avg_degree %.2f variance %.2f\n", mindeg, maxdeg, avgdeg, variance); */ column_indices = (int *)malloc(count * sizeof(int)); weight = (WeightT *)malloc(count * sizeof(WeightT)); vector<Edge>::iterator neighbor_list; for (int i = 0, index = 0; i < m; i++) { neighbor_list = vertices[i].begin(); while (neighbor_list != vertices[i].end()) { column_indices[index] = (*neighbor_list).dst; weight[index] = (*neighbor_list).wt; index ++; neighbor_list ++; } } /* // print some neighbor lists for (int i = 0; i < 6; i++) { int row_begin = row_offsets[i]; int row_end = row_offsets[i + 1]; cout << "src " << i << ": "; for (int j = row_begin; j < row_end; j ++) cout << column_indices[j] << " "; cout << endl; } //*/ //for (int i = 0; i < 10; i++) cout << weight[i] << ", "; //cout << endl; } // transfer R-MAT generated gr graph to CSR format void gr2csr(char *gr, int &m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading RMAT (.gr) input file %s\n", gr); std::ifstream cfile; cfile.open(gr); std::string str; getline(cfile, str); char c; sscanf(str.c_str(), "%c", &c); while (c == 'c') { getline(cfile, str); sscanf(str.c_str(), "%c", &c); } char sp[3]; sscanf(str.c_str(), "%c %s %d %d", &c, sp, &m, &nnz); vector<vector<Edge> > vertices; vector<Edge> neighbors; for (int i = 0; i < m; i++) vertices.push_back(neighbors); int src, dst; for (int i = 0; i < nnz; i++) { getline(cfile, str); sscanf(str.c_str(), "%c %d %d", &c, &src, &dst); if (c != 'a') printf("line %d\n", __LINE__); src--; dst--; Edge e1, e2; if(symmetrize) { e2.dst = src; e2.wt = 1; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = 1; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = 1; vertices[dst].push_back(e1); } } fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } // transfer *.graph file to CSR format void graph2csr(char *graph, int &m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading .graph input file %s\n", graph); std::ifstream cfile; cfile.open(graph); std::string str; getline(cfile, str); sscanf(str.c_str(), "%d %d", &m, &nnz); vector<vector<Edge> > vertices; vector<Edge> neighbors; for (int i = 0; i < m; i++) vertices.push_back(neighbors); int dst; for (int src = 0; src < m; src ++) { getline(cfile, str); istringstream istr; istr.str(str); while(istr>>dst) { dst --; Edge e1, e2; if(symmetrize) { e2.dst = src; e2.wt = 1; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = 1; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = 1; vertices[dst].push_back(e1); } } istr.clear(); } cfile.close(); fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } // transfer mtx graph to CSR format void mtx2csr(char *mtx, int &m, int &n, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading (.mtx) input file %s\n", mtx); std::ifstream cfile; cfile.open(mtx); std::string str; getline(cfile, str); char c; sscanf(str.c_str(), "%c", &c); while (c == '%') { getline(cfile, str); sscanf(str.c_str(), "%c", &c); } sscanf(str.c_str(), "%d %d %d", &m, &n, &nnz); if (m != n) { printf("Warning, m(%d) != n(%d)\n", m, n); } vector<vector<Edge> > vertices; vector<Edge> neighbors; for (int i = 0; i < m; i ++) vertices.push_back(neighbors); int dst, src; WeightT wt = 1.0f; for (int i = 0; i < nnz; i ++) { getline(cfile, str); int num = sscanf(str.c_str(), "%d %d %f", &src, &dst, &wt); if (num == 2) wt = 1; if (wt < 0) wt = -wt; // non-negtive weight src--; dst--; Edge e1, e2; if(symmetrize && src != dst) { e2.dst = src; e2.wt = wt; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = wt; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = wt; vertices[dst].push_back(e1); } } cfile.close(); fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } /* void sort_neighbors(int m, int *row_offsets, int *&column_indices) { vector<int> neighbors; #pragma omp parallel for for(int i = 0; i < m; i++) { int row_begin = row_offsets[i]; int row_end = row_offsets[i + 1]; for (int offset = row_begin; offset < row_end; ++ offset) { neighbors.push_back(column_indices[offset]); } std::sort(neighbors.begin(), neighbors.end()); int k = 0; for (int offset = row_begin; offset < row_end; ++ offset) { column_indices[offset] = neighbors[k++]; } } } */ void read_graph(int argc, char *argv[], int &m, int &n, int &nnz, int *&row_offsets, int *&column_indices, int *&degree, WeightT *&weight, bool is_symmetrize=false, bool is_transpose=false, bool sorted=true, bool remove_selfloops=true, bool remove_redundents=true) { //if(is_symmetrize) printf("Requiring symmetric graphs for this algorithm\n"); if (strstr(argv[1], ".mtx")) mtx2csr(argv[1], m, n, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else if (strstr(argv[1], ".graph")) graph2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else if (strstr(argv[1], ".gr")) gr2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else { printf("Unrecognizable input file format\n"); exit(0); } printf("Calculating degree..."); degree = (int *)malloc(m * sizeof(int)); for (int i = 0; i < m; i++) { degree[i] = row_offsets[i + 1] - row_offsets[i]; } printf(" Done\n"); }
local_response_norm.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_LOCAL_RESPONSE_NORM_H_ #define MACE_KERNELS_LOCAL_RESPONSE_NORM_H_ #include <algorithm> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/public/mace.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { template<DeviceType D, typename T> struct LocalResponseNormFunctor; template<> struct LocalResponseNormFunctor<DeviceType::CPU, float> { MaceStatus operator()(const Tensor *input, int depth_radius, float bias, float alpha, float beta, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); const index_t batch = input->dim(0); const index_t channels = input->dim(1); const index_t height = input->dim(2); const index_t width = input->dim(3); const float *input_ptr = input->data<float>(); float *output_ptr = output->mutable_data<float>(); index_t image_size = height * width; index_t batch_size = channels * image_size; #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch; ++b) { for (index_t c = 0; c < channels; ++c) { const int begin_input_c = std::max(static_cast<index_t>(0), c - depth_radius); const int end_input_c = std::min(channels, c + depth_radius + 1); index_t pos = b * batch_size; for (index_t hw = 0; hw < height * width; ++hw, ++pos) { float accum = 0.f; for (int input_c = begin_input_c; input_c < end_input_c; ++input_c) { const float input_val = input_ptr[pos + input_c * image_size]; accum += input_val * input_val; } const float multiplier = std::pow(bias + alpha * accum, -beta); output_ptr[pos + c * image_size] = input_ptr[pos + c * image_size] * multiplier; } } } return MACE_SUCCESS; } }; } // namespace kernels } // namespace mace #endif // MACE_KERNELS_LOCAL_RESPONSE_NORM_H_
DRB083-declared-in-func-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A variable is declared inside a function called within a parallel region. The variable should be private if it does not use static storage. */ void foo() { int q=0; q += 1; } int main() { #pragma omp parallel { foo(); } return 0; }
target.c
/* Copyright (C) 2013-2017 Free Software Foundation, Inc. Contributed by Jakub Jelinek <jakub@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file contains the support of offloading. */ #include "config.h" #include "libgomp.h" #include "oacc-plugin.h" #include "oacc-int.h" #include "gomp-constants.h" #include <limits.h> #include <stdbool.h> #include <stdlib.h> #ifdef HAVE_INTTYPES_H # include <inttypes.h> /* For PRIu64. */ #endif #include <string.h> #include <assert.h> #include <errno.h> #ifdef PLUGIN_SUPPORT #include <dlfcn.h> #include "plugin-suffix.h" #endif static void gomp_target_init (void); /* The whole initialization code for offloading plugins is only run one. */ static pthread_once_t gomp_is_initialized = PTHREAD_ONCE_INIT; /* Mutex for offload image registration. */ static gomp_mutex_t register_lock; /* This structure describes an offload image. It contains type of the target device, pointer to host table descriptor, and pointer to target data. */ struct offload_image_descr { unsigned version; enum offload_target_type type; const void *host_table; const void *target_data; }; /* Array of descriptors of offload images. */ static struct offload_image_descr *offload_images; /* Total number of offload images. */ static int num_offload_images; /* Array of descriptors for all available devices. */ static struct gomp_device_descr *devices; /* Total number of available devices. */ static int num_devices; /* Number of GOMP_OFFLOAD_CAP_OPENMP_400 devices. */ static int num_devices_openmp; /* Similar to gomp_realloc, but release register_lock before gomp_fatal. */ static void * gomp_realloc_unlock (void *old, size_t size) { void *ret = realloc (old, size); if (ret == NULL) { gomp_mutex_unlock (&register_lock); gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); } return ret; } attribute_hidden void gomp_init_targets_once (void) { (void) pthread_once (&gomp_is_initialized, gomp_target_init); } attribute_hidden int gomp_get_num_devices (void) { gomp_init_targets_once (); return num_devices_openmp; } static struct gomp_device_descr * resolve_device (int device_id) { if (device_id == GOMP_DEVICE_ICV) { struct gomp_task_icv *icv = gomp_icv (false); device_id = icv->default_device_var; } if (device_id < 0 || device_id >= gomp_get_num_devices ()) return NULL; gomp_mutex_lock (&devices[device_id].lock); if (devices[device_id].state == GOMP_DEVICE_UNINITIALIZED) gomp_init_device (&devices[device_id]); else if (devices[device_id].state == GOMP_DEVICE_FINALIZED) { gomp_mutex_unlock (&devices[device_id].lock); return NULL; } gomp_mutex_unlock (&devices[device_id].lock); return &devices[device_id]; } static inline splay_tree_key gomp_map_lookup (splay_tree mem_map, splay_tree_key key) { if (key->host_start != key->host_end) return splay_tree_lookup (mem_map, key); key->host_end++; splay_tree_key n = splay_tree_lookup (mem_map, key); key->host_end--; if (n) return n; key->host_start--; n = splay_tree_lookup (mem_map, key); key->host_start++; if (n) return n; return splay_tree_lookup (mem_map, key); } static inline splay_tree_key gomp_map_0len_lookup (splay_tree mem_map, splay_tree_key key) { if (key->host_start != key->host_end) return splay_tree_lookup (mem_map, key); key->host_end++; splay_tree_key n = splay_tree_lookup (mem_map, key); key->host_end--; return n; } static inline void gomp_device_copy (struct gomp_device_descr *devicep, bool (*copy_func) (int, void *, const void *, size_t), const char *dst, void *dstaddr, const char *src, const void *srcaddr, size_t size) { if (!copy_func (devicep->target_id, dstaddr, srcaddr, size)) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Copying of %s object [%p..%p) to %s object [%p..%p) failed", src, srcaddr, srcaddr + size, dst, dstaddr, dstaddr + size); } } static void gomp_copy_host2dev (struct gomp_device_descr *devicep, void *d, const void *h, size_t sz) { gomp_device_copy (devicep, devicep->host2dev_func, "dev", d, "host", h, sz); } static void gomp_copy_dev2host (struct gomp_device_descr *devicep, void *h, const void *d, size_t sz) { gomp_device_copy (devicep, devicep->dev2host_func, "host", h, "dev", d, sz); } static void gomp_free_device_memory (struct gomp_device_descr *devicep, void *devptr) { if (!devicep->free_func (devicep->target_id, devptr)) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("error in freeing device memory block at %p", devptr); } } /* Handle the case where gomp_map_lookup, splay_tree_lookup or gomp_map_0len_lookup found oldn for newn. Helper function of gomp_map_vars. */ static inline void gomp_map_vars_existing (struct gomp_device_descr *devicep, splay_tree_key oldn, splay_tree_key newn, struct target_var_desc *tgt_var, unsigned char kind) { tgt_var->key = oldn; tgt_var->copy_from = GOMP_MAP_COPY_FROM_P (kind); tgt_var->always_copy_from = GOMP_MAP_ALWAYS_FROM_P (kind); tgt_var->offset = newn->host_start - oldn->host_start; tgt_var->length = newn->host_end - newn->host_start; if ((kind & GOMP_MAP_FLAG_FORCE) || oldn->host_start > newn->host_start || oldn->host_end < newn->host_end) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Trying to map into device [%p..%p) object when " "[%p..%p) is already mapped", (void *) newn->host_start, (void *) newn->host_end, (void *) oldn->host_start, (void *) oldn->host_end); } if (GOMP_MAP_ALWAYS_TO_P (kind)) gomp_copy_host2dev (devicep, (void *) (oldn->tgt->tgt_start + oldn->tgt_offset + newn->host_start - oldn->host_start), (void *) newn->host_start, newn->host_end - newn->host_start); if (oldn->refcount != REFCOUNT_INFINITY) oldn->refcount++; } static int get_kind (bool short_mapkind, void *kinds, int idx) { return short_mapkind ? ((unsigned short *) kinds)[idx] : ((unsigned char *) kinds)[idx]; } static void gomp_map_pointer (struct target_mem_desc *tgt, uintptr_t host_ptr, uintptr_t target_offset, uintptr_t bias) { struct gomp_device_descr *devicep = tgt->device_descr; struct splay_tree_s *mem_map = &devicep->mem_map; struct splay_tree_key_s cur_node; cur_node.host_start = host_ptr; if (cur_node.host_start == (uintptr_t) NULL) { cur_node.tgt_offset = (uintptr_t) NULL; /* FIXME: see comment about coalescing host/dev transfers below. */ gomp_copy_host2dev (devicep, (void *) (tgt->tgt_start + target_offset), (void *) &cur_node.tgt_offset, sizeof (void *)); return; } /* Add bias to the pointer value. */ cur_node.host_start += bias; cur_node.host_end = cur_node.host_start; splay_tree_key n = gomp_map_lookup (mem_map, &cur_node); if (n == NULL) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Pointer target of array section wasn't mapped"); } cur_node.host_start -= n->host_start; cur_node.tgt_offset = n->tgt->tgt_start + n->tgt_offset + cur_node.host_start; /* At this point tgt_offset is target address of the array section. Now subtract bias to get what we want to initialize the pointer with. */ cur_node.tgt_offset -= bias; /* FIXME: see comment about coalescing host/dev transfers below. */ gomp_copy_host2dev (devicep, (void *) (tgt->tgt_start + target_offset), (void *) &cur_node.tgt_offset, sizeof (void *)); } static void gomp_map_fields_existing (struct target_mem_desc *tgt, splay_tree_key n, size_t first, size_t i, void **hostaddrs, size_t *sizes, void *kinds) { struct gomp_device_descr *devicep = tgt->device_descr; struct splay_tree_s *mem_map = &devicep->mem_map; struct splay_tree_key_s cur_node; int kind; const bool short_mapkind = true; const int typemask = short_mapkind ? 0xff : 0x7; cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = cur_node.host_start + sizes[i]; splay_tree_key n2 = splay_tree_lookup (mem_map, &cur_node); kind = get_kind (short_mapkind, kinds, i); if (n2 && n2->tgt == n->tgt && n2->host_start - n->host_start == n2->tgt_offset - n->tgt_offset) { gomp_map_vars_existing (devicep, n2, &cur_node, &tgt->list[i], kind & typemask); return; } if (sizes[i] == 0) { if (cur_node.host_start > (uintptr_t) hostaddrs[first - 1]) { cur_node.host_start--; n2 = splay_tree_lookup (mem_map, &cur_node); cur_node.host_start++; if (n2 && n2->tgt == n->tgt && n2->host_start - n->host_start == n2->tgt_offset - n->tgt_offset) { gomp_map_vars_existing (devicep, n2, &cur_node, &tgt->list[i], kind & typemask); return; } } cur_node.host_end++; n2 = splay_tree_lookup (mem_map, &cur_node); cur_node.host_end--; if (n2 && n2->tgt == n->tgt && n2->host_start - n->host_start == n2->tgt_offset - n->tgt_offset) { gomp_map_vars_existing (devicep, n2, &cur_node, &tgt->list[i], kind & typemask); return; } } gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Trying to map into device [%p..%p) structure element when " "other mapped elements from the same structure weren't mapped " "together with it", (void *) cur_node.host_start, (void *) cur_node.host_end); } static inline uintptr_t gomp_map_val (struct target_mem_desc *tgt, void **hostaddrs, size_t i) { if (tgt->list[i].key != NULL) return tgt->list[i].key->tgt->tgt_start + tgt->list[i].key->tgt_offset + tgt->list[i].offset; if (tgt->list[i].offset == ~(uintptr_t) 0) return (uintptr_t) hostaddrs[i]; if (tgt->list[i].offset == ~(uintptr_t) 1) return 0; if (tgt->list[i].offset == ~(uintptr_t) 2) return tgt->list[i + 1].key->tgt->tgt_start + tgt->list[i + 1].key->tgt_offset + tgt->list[i + 1].offset + (uintptr_t) hostaddrs[i] - (uintptr_t) hostaddrs[i + 1]; return tgt->tgt_start + tgt->list[i].offset; } attribute_hidden struct target_mem_desc * gomp_map_vars (struct gomp_device_descr *devicep, size_t mapnum, void **hostaddrs, void **devaddrs, size_t *sizes, void *kinds, bool short_mapkind, enum gomp_map_vars_kind pragma_kind) { size_t i, tgt_align, tgt_size, not_found_cnt = 0; bool has_firstprivate = false; const int rshift = short_mapkind ? 8 : 3; const int typemask = short_mapkind ? 0xff : 0x7; struct splay_tree_s *mem_map = &devicep->mem_map; struct splay_tree_key_s cur_node; struct target_mem_desc *tgt = gomp_malloc (sizeof (*tgt) + sizeof (tgt->list[0]) * mapnum); tgt->list_count = mapnum; tgt->refcount = pragma_kind == GOMP_MAP_VARS_ENTER_DATA ? 0 : 1; tgt->device_descr = devicep; if (mapnum == 0) { tgt->tgt_start = 0; tgt->tgt_end = 0; return tgt; } tgt_align = sizeof (void *); tgt_size = 0; if (pragma_kind == GOMP_MAP_VARS_TARGET) { size_t align = 4 * sizeof (void *); tgt_align = align; tgt_size = mapnum * sizeof (void *); } gomp_mutex_lock (&devicep->lock); if (devicep->state == GOMP_DEVICE_FINALIZED) { gomp_mutex_unlock (&devicep->lock); free (tgt); return NULL; } for (i = 0; i < mapnum; i++) { int kind = get_kind (short_mapkind, kinds, i); if (hostaddrs[i] == NULL || (kind & typemask) == GOMP_MAP_FIRSTPRIVATE_INT) { tgt->list[i].key = NULL; tgt->list[i].offset = ~(uintptr_t) 0; continue; } else if ((kind & typemask) == GOMP_MAP_USE_DEVICE_PTR) { cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = cur_node.host_start; splay_tree_key n = gomp_map_lookup (mem_map, &cur_node); if (n == NULL) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("use_device_ptr pointer wasn't mapped"); } cur_node.host_start -= n->host_start; hostaddrs[i] = (void *) (n->tgt->tgt_start + n->tgt_offset + cur_node.host_start); tgt->list[i].key = NULL; tgt->list[i].offset = ~(uintptr_t) 0; continue; } else if ((kind & typemask) == GOMP_MAP_STRUCT) { size_t first = i + 1; size_t last = i + sizes[i]; cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = (uintptr_t) hostaddrs[last] + sizes[last]; tgt->list[i].key = NULL; tgt->list[i].offset = ~(uintptr_t) 2; splay_tree_key n = splay_tree_lookup (mem_map, &cur_node); if (n == NULL) { size_t align = (size_t) 1 << (kind >> rshift); if (tgt_align < align) tgt_align = align; tgt_size -= (uintptr_t) hostaddrs[first] - (uintptr_t) hostaddrs[i]; tgt_size = (tgt_size + align - 1) & ~(align - 1); tgt_size += cur_node.host_end - (uintptr_t) hostaddrs[i]; not_found_cnt += last - i; for (i = first; i <= last; i++) tgt->list[i].key = NULL; i--; continue; } for (i = first; i <= last; i++) gomp_map_fields_existing (tgt, n, first, i, hostaddrs, sizes, kinds); i--; continue; } else if ((kind & typemask) == GOMP_MAP_ALWAYS_POINTER) { tgt->list[i].key = NULL; tgt->list[i].offset = ~(uintptr_t) 1; has_firstprivate = true; continue; } cur_node.host_start = (uintptr_t) hostaddrs[i]; if (!GOMP_MAP_POINTER_P (kind & typemask)) cur_node.host_end = cur_node.host_start + sizes[i]; else cur_node.host_end = cur_node.host_start + sizeof (void *); if ((kind & typemask) == GOMP_MAP_FIRSTPRIVATE) { tgt->list[i].key = NULL; size_t align = (size_t) 1 << (kind >> rshift); if (tgt_align < align) tgt_align = align; tgt_size = (tgt_size + align - 1) & ~(align - 1); tgt_size += cur_node.host_end - cur_node.host_start; has_firstprivate = true; continue; } splay_tree_key n; if ((kind & typemask) == GOMP_MAP_ZERO_LEN_ARRAY_SECTION) { n = gomp_map_0len_lookup (mem_map, &cur_node); if (!n) { tgt->list[i].key = NULL; tgt->list[i].offset = ~(uintptr_t) 1; continue; } } else n = splay_tree_lookup (mem_map, &cur_node); if (n && n->refcount != REFCOUNT_LINK) gomp_map_vars_existing (devicep, n, &cur_node, &tgt->list[i], kind & typemask); else { tgt->list[i].key = NULL; size_t align = (size_t) 1 << (kind >> rshift); not_found_cnt++; if (tgt_align < align) tgt_align = align; tgt_size = (tgt_size + align - 1) & ~(align - 1); tgt_size += cur_node.host_end - cur_node.host_start; if ((kind & typemask) == GOMP_MAP_TO_PSET) { size_t j; for (j = i + 1; j < mapnum; j++) if (!GOMP_MAP_POINTER_P (get_kind (short_mapkind, kinds, j) & typemask)) break; else if ((uintptr_t) hostaddrs[j] < cur_node.host_start || ((uintptr_t) hostaddrs[j] + sizeof (void *) > cur_node.host_end)) break; else { tgt->list[j].key = NULL; i++; } } } } if (devaddrs) { if (mapnum != 1) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("unexpected aggregation"); } tgt->to_free = devaddrs[0]; tgt->tgt_start = (uintptr_t) tgt->to_free; tgt->tgt_end = tgt->tgt_start + sizes[0]; } else if (not_found_cnt || pragma_kind == GOMP_MAP_VARS_TARGET) { /* Allocate tgt_align aligned tgt_size block of memory. */ /* FIXME: Perhaps change interface to allocate properly aligned memory. */ tgt->to_free = devicep->alloc_func (devicep->target_id, tgt_size + tgt_align - 1); if (!tgt->to_free) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("device memory allocation fail"); } tgt->tgt_start = (uintptr_t) tgt->to_free; tgt->tgt_start = (tgt->tgt_start + tgt_align - 1) & ~(tgt_align - 1); tgt->tgt_end = tgt->tgt_start + tgt_size; } else { tgt->to_free = NULL; tgt->tgt_start = 0; tgt->tgt_end = 0; } tgt_size = 0; if (pragma_kind == GOMP_MAP_VARS_TARGET) tgt_size = mapnum * sizeof (void *); tgt->array = NULL; if (not_found_cnt || has_firstprivate) { if (not_found_cnt) tgt->array = gomp_malloc (not_found_cnt * sizeof (*tgt->array)); splay_tree_node array = tgt->array; size_t j, field_tgt_offset = 0, field_tgt_clear = ~(size_t) 0; uintptr_t field_tgt_base = 0; for (i = 0; i < mapnum; i++) if (tgt->list[i].key == NULL) { int kind = get_kind (short_mapkind, kinds, i); if (hostaddrs[i] == NULL) continue; switch (kind & typemask) { size_t align, len, first, last; splay_tree_key n; case GOMP_MAP_FIRSTPRIVATE: align = (size_t) 1 << (kind >> rshift); tgt_size = (tgt_size + align - 1) & ~(align - 1); tgt->list[i].offset = tgt_size; len = sizes[i]; gomp_copy_host2dev (devicep, (void *) (tgt->tgt_start + tgt_size), (void *) hostaddrs[i], len); tgt_size += len; continue; case GOMP_MAP_FIRSTPRIVATE_INT: case GOMP_MAP_USE_DEVICE_PTR: case GOMP_MAP_ZERO_LEN_ARRAY_SECTION: continue; case GOMP_MAP_STRUCT: first = i + 1; last = i + sizes[i]; cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = (uintptr_t) hostaddrs[last] + sizes[last]; if (tgt->list[first].key != NULL) continue; n = splay_tree_lookup (mem_map, &cur_node); if (n == NULL) { size_t align = (size_t) 1 << (kind >> rshift); tgt_size -= (uintptr_t) hostaddrs[first] - (uintptr_t) hostaddrs[i]; tgt_size = (tgt_size + align - 1) & ~(align - 1); tgt_size += (uintptr_t) hostaddrs[first] - (uintptr_t) hostaddrs[i]; field_tgt_base = (uintptr_t) hostaddrs[first]; field_tgt_offset = tgt_size; field_tgt_clear = last; tgt_size += cur_node.host_end - (uintptr_t) hostaddrs[first]; continue; } for (i = first; i <= last; i++) gomp_map_fields_existing (tgt, n, first, i, hostaddrs, sizes, kinds); i--; continue; case GOMP_MAP_ALWAYS_POINTER: cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = cur_node.host_start + sizeof (void *); n = splay_tree_lookup (mem_map, &cur_node); if (n == NULL || n->host_start > cur_node.host_start || n->host_end < cur_node.host_end) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("always pointer not mapped"); } if ((get_kind (short_mapkind, kinds, i - 1) & typemask) != GOMP_MAP_ALWAYS_POINTER) cur_node.tgt_offset = gomp_map_val (tgt, hostaddrs, i - 1); if (cur_node.tgt_offset) cur_node.tgt_offset -= sizes[i]; gomp_copy_host2dev (devicep, (void *) (n->tgt->tgt_start + n->tgt_offset + cur_node.host_start - n->host_start), (void *) &cur_node.tgt_offset, sizeof (void *)); cur_node.tgt_offset = n->tgt->tgt_start + n->tgt_offset + cur_node.host_start - n->host_start; continue; default: break; } splay_tree_key k = &array->key; k->host_start = (uintptr_t) hostaddrs[i]; if (!GOMP_MAP_POINTER_P (kind & typemask)) k->host_end = k->host_start + sizes[i]; else k->host_end = k->host_start + sizeof (void *); splay_tree_key n = splay_tree_lookup (mem_map, k); if (n && n->refcount != REFCOUNT_LINK) gomp_map_vars_existing (devicep, n, k, &tgt->list[i], kind & typemask); else { k->link_key = NULL; if (n && n->refcount == REFCOUNT_LINK) { /* Replace target address of the pointer with target address of mapped object in the splay tree. */ splay_tree_remove (mem_map, n); k->link_key = n; } size_t align = (size_t) 1 << (kind >> rshift); tgt->list[i].key = k; k->tgt = tgt; if (field_tgt_clear != ~(size_t) 0) { k->tgt_offset = k->host_start - field_tgt_base + field_tgt_offset; if (i == field_tgt_clear) field_tgt_clear = ~(size_t) 0; } else { tgt_size = (tgt_size + align - 1) & ~(align - 1); k->tgt_offset = tgt_size; tgt_size += k->host_end - k->host_start; } tgt->list[i].copy_from = GOMP_MAP_COPY_FROM_P (kind & typemask); tgt->list[i].always_copy_from = GOMP_MAP_ALWAYS_FROM_P (kind & typemask); tgt->list[i].offset = 0; tgt->list[i].length = k->host_end - k->host_start; k->refcount = 1; tgt->refcount++; array->left = NULL; array->right = NULL; splay_tree_insert (mem_map, array); switch (kind & typemask) { case GOMP_MAP_ALLOC: case GOMP_MAP_FROM: case GOMP_MAP_FORCE_ALLOC: case GOMP_MAP_FORCE_FROM: case GOMP_MAP_ALWAYS_FROM: break; case GOMP_MAP_TO: case GOMP_MAP_TOFROM: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_ALWAYS_TOFROM: /* FIXME: Perhaps add some smarts, like if copying several adjacent fields from host to target, use some host buffer to avoid sending each var individually. */ gomp_copy_host2dev (devicep, (void *) (tgt->tgt_start + k->tgt_offset), (void *) k->host_start, k->host_end - k->host_start); break; case GOMP_MAP_POINTER: gomp_map_pointer (tgt, (uintptr_t) *(void **) k->host_start, k->tgt_offset, sizes[i]); break; case GOMP_MAP_TO_PSET: /* FIXME: see above FIXME comment. */ gomp_copy_host2dev (devicep, (void *) (tgt->tgt_start + k->tgt_offset), (void *) k->host_start, k->host_end - k->host_start); for (j = i + 1; j < mapnum; j++) if (!GOMP_MAP_POINTER_P (get_kind (short_mapkind, kinds, j) & typemask)) break; else if ((uintptr_t) hostaddrs[j] < k->host_start || ((uintptr_t) hostaddrs[j] + sizeof (void *) > k->host_end)) break; else { tgt->list[j].key = k; tgt->list[j].copy_from = false; tgt->list[j].always_copy_from = false; if (k->refcount != REFCOUNT_INFINITY) k->refcount++; gomp_map_pointer (tgt, (uintptr_t) *(void **) hostaddrs[j], k->tgt_offset + ((uintptr_t) hostaddrs[j] - k->host_start), sizes[j]); i++; } break; case GOMP_MAP_FORCE_PRESENT: { /* We already looked up the memory region above and it was missing. */ size_t size = k->host_end - k->host_start; gomp_mutex_unlock (&devicep->lock); #ifdef HAVE_INTTYPES_H gomp_fatal ("present clause: !acc_is_present (%p, " "%"PRIu64" (0x%"PRIx64"))", (void *) k->host_start, (uint64_t) size, (uint64_t) size); #else gomp_fatal ("present clause: !acc_is_present (%p, " "%lu (0x%lx))", (void *) k->host_start, (unsigned long) size, (unsigned long) size); #endif } break; case GOMP_MAP_FORCE_DEVICEPTR: assert (k->host_end - k->host_start == sizeof (void *)); gomp_copy_host2dev (devicep, (void *) (tgt->tgt_start + k->tgt_offset), (void *) k->host_start, sizeof (void *)); break; default: gomp_mutex_unlock (&devicep->lock); gomp_fatal ("%s: unhandled kind 0x%.2x", __FUNCTION__, kind); } if (k->link_key) { /* Set link pointer on target to the device address of the mapped object. */ void *tgt_addr = (void *) (tgt->tgt_start + k->tgt_offset); devicep->host2dev_func (devicep->target_id, (void *) n->tgt_offset, &tgt_addr, sizeof (void *)); } array++; } } } if (pragma_kind == GOMP_MAP_VARS_TARGET) { for (i = 0; i < mapnum; i++) { cur_node.tgt_offset = gomp_map_val (tgt, hostaddrs, i); /* FIXME: see above FIXME comment. */ gomp_copy_host2dev (devicep, (void *) (tgt->tgt_start + i * sizeof (void *)), (void *) &cur_node.tgt_offset, sizeof (void *)); } } /* If the variable from "omp target enter data" map-list was already mapped, tgt is not needed. Otherwise tgt will be freed by gomp_unmap_vars or gomp_exit_data. */ if (pragma_kind == GOMP_MAP_VARS_ENTER_DATA && tgt->refcount == 0) { free (tgt); tgt = NULL; } gomp_mutex_unlock (&devicep->lock); return tgt; } static void gomp_unmap_tgt (struct target_mem_desc *tgt) { /* Deallocate on target the tgt->tgt_start .. tgt->tgt_end region. */ if (tgt->tgt_end) gomp_free_device_memory (tgt->device_descr, tgt->to_free); free (tgt->array); free (tgt); } /* Unmap variables described by TGT. If DO_COPYFROM is true, copy relevant variables back from device to host: if it is false, it is assumed that this has been done already. */ attribute_hidden void gomp_unmap_vars (struct target_mem_desc *tgt, bool do_copyfrom) { struct gomp_device_descr *devicep = tgt->device_descr; if (tgt->list_count == 0) { free (tgt); return; } gomp_mutex_lock (&devicep->lock); if (devicep->state == GOMP_DEVICE_FINALIZED) { gomp_mutex_unlock (&devicep->lock); free (tgt->array); free (tgt); return; } size_t i; for (i = 0; i < tgt->list_count; i++) { splay_tree_key k = tgt->list[i].key; if (k == NULL) continue; bool do_unmap = false; if (k->refcount > 1 && k->refcount != REFCOUNT_INFINITY) k->refcount--; else if (k->refcount == 1) { k->refcount--; do_unmap = true; } if ((do_unmap && do_copyfrom && tgt->list[i].copy_from) || tgt->list[i].always_copy_from) gomp_copy_dev2host (devicep, (void *) (k->host_start + tgt->list[i].offset), (void *) (k->tgt->tgt_start + k->tgt_offset + tgt->list[i].offset), tgt->list[i].length); if (do_unmap) { splay_tree_remove (&devicep->mem_map, k); if (k->link_key) splay_tree_insert (&devicep->mem_map, (splay_tree_node) k->link_key); if (k->tgt->refcount > 1) k->tgt->refcount--; else gomp_unmap_tgt (k->tgt); } } if (tgt->refcount > 1) tgt->refcount--; else gomp_unmap_tgt (tgt); gomp_mutex_unlock (&devicep->lock); } static void gomp_update (struct gomp_device_descr *devicep, size_t mapnum, void **hostaddrs, size_t *sizes, void *kinds, bool short_mapkind) { size_t i; struct splay_tree_key_s cur_node; const int typemask = short_mapkind ? 0xff : 0x7; if (!devicep) return; if (mapnum == 0) return; gomp_mutex_lock (&devicep->lock); if (devicep->state == GOMP_DEVICE_FINALIZED) { gomp_mutex_unlock (&devicep->lock); return; } for (i = 0; i < mapnum; i++) if (sizes[i]) { cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = cur_node.host_start + sizes[i]; splay_tree_key n = splay_tree_lookup (&devicep->mem_map, &cur_node); if (n) { int kind = get_kind (short_mapkind, kinds, i); if (n->host_start > cur_node.host_start || n->host_end < cur_node.host_end) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Trying to update [%p..%p) object when " "only [%p..%p) is mapped", (void *) cur_node.host_start, (void *) cur_node.host_end, (void *) n->host_start, (void *) n->host_end); } void *hostaddr = (void *) cur_node.host_start; void *devaddr = (void *) (n->tgt->tgt_start + n->tgt_offset + cur_node.host_start - n->host_start); size_t size = cur_node.host_end - cur_node.host_start; if (GOMP_MAP_COPY_TO_P (kind & typemask)) gomp_copy_host2dev (devicep, devaddr, hostaddr, size); if (GOMP_MAP_COPY_FROM_P (kind & typemask)) gomp_copy_dev2host (devicep, hostaddr, devaddr, size); } } gomp_mutex_unlock (&devicep->lock); } /* Load image pointed by TARGET_DATA to the device, specified by DEVICEP. And insert to splay tree the mapping between addresses from HOST_TABLE and from loaded target image. We rely in the host and device compiler emitting variable and functions in the same order. */ static void gomp_load_image_to_device (struct gomp_device_descr *devicep, unsigned version, const void *host_table, const void *target_data, bool is_register_lock) { void **host_func_table = ((void ***) host_table)[0]; void **host_funcs_end = ((void ***) host_table)[1]; void **host_var_table = ((void ***) host_table)[2]; void **host_vars_end = ((void ***) host_table)[3]; /* The func table contains only addresses, the var table contains addresses and corresponding sizes. */ int num_funcs = host_funcs_end - host_func_table; int num_vars = (host_vars_end - host_var_table) / 2; /* Load image to device and get target addresses for the image. */ struct addr_pair *target_table = NULL; int i, num_target_entries; num_target_entries = devicep->load_image_func (devicep->target_id, version, target_data, &target_table); if (num_target_entries != num_funcs + num_vars) { gomp_mutex_unlock (&devicep->lock); if (is_register_lock) gomp_mutex_unlock (&register_lock); gomp_fatal ("Cannot map target functions or variables" " (expected %u, have %u)", num_funcs + num_vars, num_target_entries); } /* Insert host-target address mapping into splay tree. */ struct target_mem_desc *tgt = gomp_malloc (sizeof (*tgt)); tgt->array = gomp_malloc ((num_funcs + num_vars) * sizeof (*tgt->array)); tgt->refcount = REFCOUNT_INFINITY; tgt->tgt_start = 0; tgt->tgt_end = 0; tgt->to_free = NULL; tgt->prev = NULL; tgt->list_count = 0; tgt->device_descr = devicep; splay_tree_node array = tgt->array; for (i = 0; i < num_funcs; i++) { splay_tree_key k = &array->key; k->host_start = (uintptr_t) host_func_table[i]; k->host_end = k->host_start + 1; k->tgt = tgt; k->tgt_offset = target_table[i].start; k->refcount = REFCOUNT_INFINITY; k->link_key = NULL; array->left = NULL; array->right = NULL; splay_tree_insert (&devicep->mem_map, array); array++; } /* Most significant bit of the size in host and target tables marks "omp declare target link" variables. */ const uintptr_t link_bit = 1ULL << (sizeof (uintptr_t) * __CHAR_BIT__ - 1); const uintptr_t size_mask = ~link_bit; for (i = 0; i < num_vars; i++) { struct addr_pair *target_var = &target_table[num_funcs + i]; uintptr_t target_size = target_var->end - target_var->start; if ((uintptr_t) host_var_table[i * 2 + 1] != target_size) { gomp_mutex_unlock (&devicep->lock); if (is_register_lock) gomp_mutex_unlock (&register_lock); gomp_fatal ("Cannot map target variables (size mismatch)"); } splay_tree_key k = &array->key; k->host_start = (uintptr_t) host_var_table[i * 2]; k->host_end = k->host_start + (size_mask & (uintptr_t) host_var_table[i * 2 + 1]); k->tgt = tgt; k->tgt_offset = target_var->start; k->refcount = target_size & link_bit ? REFCOUNT_LINK : REFCOUNT_INFINITY; k->link_key = NULL; array->left = NULL; array->right = NULL; splay_tree_insert (&devicep->mem_map, array); array++; } free (target_table); } /* Unload the mappings described by target_data from device DEVICE_P. The device must be locked. */ static void gomp_unload_image_from_device (struct gomp_device_descr *devicep, unsigned version, const void *host_table, const void *target_data) { void **host_func_table = ((void ***) host_table)[0]; void **host_funcs_end = ((void ***) host_table)[1]; void **host_var_table = ((void ***) host_table)[2]; void **host_vars_end = ((void ***) host_table)[3]; /* The func table contains only addresses, the var table contains addresses and corresponding sizes. */ int num_funcs = host_funcs_end - host_func_table; int num_vars = (host_vars_end - host_var_table) / 2; struct splay_tree_key_s k; splay_tree_key node = NULL; /* Find mapping at start of node array */ if (num_funcs || num_vars) { k.host_start = (num_funcs ? (uintptr_t) host_func_table[0] : (uintptr_t) host_var_table[0]); k.host_end = k.host_start + 1; node = splay_tree_lookup (&devicep->mem_map, &k); } if (!devicep->unload_image_func (devicep->target_id, version, target_data)) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("image unload fail"); } /* Remove mappings from splay tree. */ int i; for (i = 0; i < num_funcs; i++) { k.host_start = (uintptr_t) host_func_table[i]; k.host_end = k.host_start + 1; splay_tree_remove (&devicep->mem_map, &k); } /* Most significant bit of the size in host and target tables marks "omp declare target link" variables. */ const uintptr_t link_bit = 1ULL << (sizeof (uintptr_t) * __CHAR_BIT__ - 1); const uintptr_t size_mask = ~link_bit; bool is_tgt_unmapped = false; for (i = 0; i < num_vars; i++) { k.host_start = (uintptr_t) host_var_table[i * 2]; k.host_end = k.host_start + (size_mask & (uintptr_t) host_var_table[i * 2 + 1]); if (!(link_bit & (uintptr_t) host_var_table[i * 2 + 1])) splay_tree_remove (&devicep->mem_map, &k); else { splay_tree_key n = splay_tree_lookup (&devicep->mem_map, &k); splay_tree_remove (&devicep->mem_map, n); if (n->link_key) { if (n->tgt->refcount > 1) n->tgt->refcount--; else { is_tgt_unmapped = true; gomp_unmap_tgt (n->tgt); } } } } if (node && !is_tgt_unmapped) { free (node->tgt); free (node); } } /* This function should be called from every offload image while loading. It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of the target, and TARGET_DATA needed by target plugin. */ void GOMP_offload_register_ver (unsigned version, const void *host_table, int target_type, const void *target_data) { int i; if (GOMP_VERSION_LIB (version) > GOMP_VERSION) gomp_fatal ("Library too old for offload (version %u < %u)", GOMP_VERSION, GOMP_VERSION_LIB (version)); gomp_mutex_lock (&register_lock); /* Load image to all initialized devices. */ for (i = 0; i < num_devices; i++) { struct gomp_device_descr *devicep = &devices[i]; gomp_mutex_lock (&devicep->lock); if (devicep->type == target_type && devicep->state == GOMP_DEVICE_INITIALIZED) gomp_load_image_to_device (devicep, version, host_table, target_data, true); gomp_mutex_unlock (&devicep->lock); } /* Insert image to array of pending images. */ offload_images = gomp_realloc_unlock (offload_images, (num_offload_images + 1) * sizeof (struct offload_image_descr)); offload_images[num_offload_images].version = version; offload_images[num_offload_images].type = target_type; offload_images[num_offload_images].host_table = host_table; offload_images[num_offload_images].target_data = target_data; num_offload_images++; gomp_mutex_unlock (&register_lock); } void GOMP_offload_register (const void *host_table, int target_type, const void *target_data) { GOMP_offload_register_ver (0, host_table, target_type, target_data); } /* This function should be called from every offload image while unloading. It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of the target, and TARGET_DATA needed by target plugin. */ void GOMP_offload_unregister_ver (unsigned version, const void *host_table, int target_type, const void *target_data) { int i; gomp_mutex_lock (&register_lock); /* Unload image from all initialized devices. */ for (i = 0; i < num_devices; i++) { struct gomp_device_descr *devicep = &devices[i]; gomp_mutex_lock (&devicep->lock); if (devicep->type == target_type && devicep->state == GOMP_DEVICE_INITIALIZED) gomp_unload_image_from_device (devicep, version, host_table, target_data); gomp_mutex_unlock (&devicep->lock); } /* Remove image from array of pending images. */ for (i = 0; i < num_offload_images; i++) if (offload_images[i].target_data == target_data) { offload_images[i] = offload_images[--num_offload_images]; break; } gomp_mutex_unlock (&register_lock); } void GOMP_offload_unregister (const void *host_table, int target_type, const void *target_data) { GOMP_offload_unregister_ver (0, host_table, target_type, target_data); } /* This function initializes the target device, specified by DEVICEP. DEVICEP must be locked on entry, and remains locked on return. */ attribute_hidden void gomp_init_device (struct gomp_device_descr *devicep) { int i; if (!devicep->init_device_func (devicep->target_id)) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("device initialization failed"); } /* Load to device all images registered by the moment. */ for (i = 0; i < num_offload_images; i++) { struct offload_image_descr *image = &offload_images[i]; if (image->type == devicep->type) gomp_load_image_to_device (devicep, image->version, image->host_table, image->target_data, false); } devicep->state = GOMP_DEVICE_INITIALIZED; } attribute_hidden void gomp_unload_device (struct gomp_device_descr *devicep) { if (devicep->state == GOMP_DEVICE_INITIALIZED) { unsigned i; /* Unload from device all images registered at the moment. */ for (i = 0; i < num_offload_images; i++) { struct offload_image_descr *image = &offload_images[i]; if (image->type == devicep->type) gomp_unload_image_from_device (devicep, image->version, image->host_table, image->target_data); } } } /* Free address mapping tables. MM must be locked on entry, and remains locked on return. */ attribute_hidden void gomp_free_memmap (struct splay_tree_s *mem_map) { while (mem_map->root) { struct target_mem_desc *tgt = mem_map->root->key.tgt; splay_tree_remove (mem_map, &mem_map->root->key); free (tgt->array); free (tgt); } } /* Host fallback for GOMP_target{,_ext} routines. */ static void gomp_target_fallback (void (*fn) (void *), void **hostaddrs) { struct gomp_thread old_thr, *thr = gomp_thread (); old_thr = *thr; memset (thr, '\0', sizeof (*thr)); if (gomp_places_list) { thr->place = old_thr.place; thr->ts.place_partition_len = gomp_places_list_len; } fn (hostaddrs); gomp_free_thread (thr); *thr = old_thr; } /* Calculate alignment and size requirements of a private copy of data shared as GOMP_MAP_FIRSTPRIVATE and store them to TGT_ALIGN and TGT_SIZE. */ static inline void calculate_firstprivate_requirements (size_t mapnum, size_t *sizes, unsigned short *kinds, size_t *tgt_align, size_t *tgt_size) { size_t i; for (i = 0; i < mapnum; i++) if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE) { size_t align = (size_t) 1 << (kinds[i] >> 8); if (*tgt_align < align) *tgt_align = align; *tgt_size = (*tgt_size + align - 1) & ~(align - 1); *tgt_size += sizes[i]; } } /* Copy data shared as GOMP_MAP_FIRSTPRIVATE to DST. */ static inline void copy_firstprivate_data (char *tgt, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds, size_t tgt_align, size_t tgt_size) { uintptr_t al = (uintptr_t) tgt & (tgt_align - 1); if (al) tgt += tgt_align - al; tgt_size = 0; size_t i; for (i = 0; i < mapnum; i++) if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE) { size_t align = (size_t) 1 << (kinds[i] >> 8); tgt_size = (tgt_size + align - 1) & ~(align - 1); memcpy (tgt + tgt_size, hostaddrs[i], sizes[i]); hostaddrs[i] = tgt + tgt_size; tgt_size = tgt_size + sizes[i]; } } /* Helper function of GOMP_target{,_ext} routines. */ static void * gomp_get_target_fn_addr (struct gomp_device_descr *devicep, void (*host_fn) (void *)) { if (devicep->capabilities & GOMP_OFFLOAD_CAP_NATIVE_EXEC) return (void *) host_fn; else { gomp_mutex_lock (&devicep->lock); if (devicep->state == GOMP_DEVICE_FINALIZED) { gomp_mutex_unlock (&devicep->lock); return NULL; } struct splay_tree_key_s k; k.host_start = (uintptr_t) host_fn; k.host_end = k.host_start + 1; splay_tree_key tgt_fn = splay_tree_lookup (&devicep->mem_map, &k); gomp_mutex_unlock (&devicep->lock); if (tgt_fn == NULL) return NULL; return (void *) tgt_fn->tgt_offset; } } /* Called when encountering a target directive. If DEVICE is GOMP_DEVICE_ICV, it means use device-var ICV. If it is GOMP_DEVICE_HOST_FALLBACK (or any value larger than last available hw device), use host fallback. FN is address of host code, UNUSED is part of the current ABI, but we're not actually using it. HOSTADDRS, SIZES and KINDS are arrays with MAPNUM entries, with addresses of the host objects, sizes of the host objects (resp. for pointer kind pointer bias and assumed sizeof (void *) size) and kinds. */ void GOMP_target (int device, void (*fn) (void *), const void *unused, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds) { struct gomp_device_descr *devicep = resolve_device (device); void *fn_addr; if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) /* All shared memory devices should use the GOMP_target_ext function. */ || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM || !(fn_addr = gomp_get_target_fn_addr (devicep, fn))) return gomp_target_fallback (fn, hostaddrs); struct target_mem_desc *tgt_vars = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, false, GOMP_MAP_VARS_TARGET); devicep->run_func (devicep->target_id, fn_addr, (void *) tgt_vars->tgt_start, NULL); gomp_unmap_vars (tgt_vars, true); } /* Like GOMP_target, but KINDS is 16-bit, UNUSED is no longer present, and several arguments have been added: FLAGS is a bitmask, see GOMP_TARGET_FLAG_* in gomp-constants.h. DEPEND is array of dependencies, see GOMP_task for details. ARGS is a pointer to an array consisting of a variable number of both device-independent and device-specific arguments, which can take one two elements where the first specifies for which device it is intended, the type and optionally also the value. If the value is not present in the first one, the whole second element the actual value. The last element of the array is a single NULL. Among the device independent can be for example NUM_TEAMS and THREAD_LIMIT. NUM_TEAMS is positive if GOMP_teams will be called in the body with that value, or 1 if teams construct is not present, or 0, if teams construct does not have num_teams clause and so the choice is implementation defined, and -1 if it can't be determined on the host what value will GOMP_teams have on the device. THREAD_LIMIT similarly is positive if GOMP_teams will be called in the body with that value, or 0, if teams construct does not have thread_limit clause or the teams construct is not present, or -1 if it can't be determined on the host what value will GOMP_teams have on the device. */ void GOMP_target_ext (int device, void (*fn) (void *), size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds, unsigned int flags, void **depend, void **args) { struct gomp_device_descr *devicep = resolve_device (device); size_t tgt_align = 0, tgt_size = 0; bool fpc_done = false; if (flags & GOMP_TARGET_FLAG_NOWAIT) { struct gomp_thread *thr = gomp_thread (); /* Create a team if we don't have any around, as nowait target tasks make sense to run asynchronously even when outside of any parallel. */ if (__builtin_expect (thr->ts.team == NULL, 0)) { struct gomp_team *team = gomp_new_team (1); struct gomp_task *task = thr->task; struct gomp_task_icv *icv = task ? &task->icv : &gomp_global_icv; team->prev_ts = thr->ts; thr->ts.team = team; thr->ts.team_id = 0; thr->ts.work_share = &team->work_shares[0]; thr->ts.last_work_share = NULL; #ifdef HAVE_SYNC_BUILTINS thr->ts.single_count = 0; #endif thr->ts.static_trip = 0; thr->task = &team->implicit_task[0]; gomp_init_task (thr->task, NULL, icv); if (task) { thr->task = task; gomp_end_task (); free (task); thr->task = &team->implicit_task[0]; } else pthread_setspecific (gomp_thread_destructor, thr); } if (thr->ts.team && !thr->task->final_task) { gomp_create_target_task (devicep, fn, mapnum, hostaddrs, sizes, kinds, flags, depend, args, GOMP_TARGET_TASK_BEFORE_MAP); return; } } /* If there are depend clauses, but nowait is not present (or we are in a final task), block the parent task until the dependencies are resolved and then just continue with the rest of the function as if it is a merged task. */ if (depend != NULL) { struct gomp_thread *thr = gomp_thread (); if (thr->task && thr->task->depend_hash) { /* If we might need to wait, copy firstprivate now. */ calculate_firstprivate_requirements (mapnum, sizes, kinds, &tgt_align, &tgt_size); if (tgt_align) { char *tgt = gomp_alloca (tgt_size + tgt_align - 1); copy_firstprivate_data (tgt, mapnum, hostaddrs, sizes, kinds, tgt_align, tgt_size); } fpc_done = true; gomp_task_maybe_wait_for_dependencies (depend); } } void *fn_addr; if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || !(fn_addr = gomp_get_target_fn_addr (devicep, fn)) || (devicep->can_run_func && !devicep->can_run_func (fn_addr))) { if (!fpc_done) { calculate_firstprivate_requirements (mapnum, sizes, kinds, &tgt_align, &tgt_size); if (tgt_align) { char *tgt = gomp_alloca (tgt_size + tgt_align - 1); copy_firstprivate_data (tgt, mapnum, hostaddrs, sizes, kinds, tgt_align, tgt_size); } } gomp_target_fallback (fn, hostaddrs); return; } struct target_mem_desc *tgt_vars; if (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) { if (!fpc_done) { calculate_firstprivate_requirements (mapnum, sizes, kinds, &tgt_align, &tgt_size); if (tgt_align) { char *tgt = gomp_alloca (tgt_size + tgt_align - 1); copy_firstprivate_data (tgt, mapnum, hostaddrs, sizes, kinds, tgt_align, tgt_size); } } tgt_vars = NULL; } else tgt_vars = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, true, GOMP_MAP_VARS_TARGET); devicep->run_func (devicep->target_id, fn_addr, tgt_vars ? (void *) tgt_vars->tgt_start : hostaddrs, args); if (tgt_vars) gomp_unmap_vars (tgt_vars, true); } /* Host fallback for GOMP_target_data{,_ext} routines. */ static void gomp_target_data_fallback (void) { struct gomp_task_icv *icv = gomp_icv (false); if (icv->target_data) { /* Even when doing a host fallback, if there are any active #pragma omp target data constructs, need to remember the new #pragma omp target data, otherwise GOMP_target_end_data would get out of sync. */ struct target_mem_desc *tgt = gomp_map_vars (NULL, 0, NULL, NULL, NULL, NULL, false, GOMP_MAP_VARS_DATA); tgt->prev = icv->target_data; icv->target_data = tgt; } } void GOMP_target_data (int device, const void *unused, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds) { struct gomp_device_descr *devicep = resolve_device (device); if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)) return gomp_target_data_fallback (); struct target_mem_desc *tgt = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, false, GOMP_MAP_VARS_DATA); struct gomp_task_icv *icv = gomp_icv (true); tgt->prev = icv->target_data; icv->target_data = tgt; } void GOMP_target_data_ext (int device, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds) { struct gomp_device_descr *devicep = resolve_device (device); if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return gomp_target_data_fallback (); struct target_mem_desc *tgt = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, true, GOMP_MAP_VARS_DATA); struct gomp_task_icv *icv = gomp_icv (true); tgt->prev = icv->target_data; icv->target_data = tgt; } void GOMP_target_end_data (void) { struct gomp_task_icv *icv = gomp_icv (false); if (icv->target_data) { struct target_mem_desc *tgt = icv->target_data; icv->target_data = tgt->prev; gomp_unmap_vars (tgt, true); } } void GOMP_target_update (int device, const void *unused, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds) { struct gomp_device_descr *devicep = resolve_device (device); if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return; gomp_update (devicep, mapnum, hostaddrs, sizes, kinds, false); } void GOMP_target_update_ext (int device, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds, unsigned int flags, void **depend) { struct gomp_device_descr *devicep = resolve_device (device); /* If there are depend clauses, but nowait is not present, block the parent task until the dependencies are resolved and then just continue with the rest of the function as if it is a merged task. Until we are able to schedule task during variable mapping or unmapping, ignore nowait if depend clauses are not present. */ if (depend != NULL) { struct gomp_thread *thr = gomp_thread (); if (thr->task && thr->task->depend_hash) { if ((flags & GOMP_TARGET_FLAG_NOWAIT) && thr->ts.team && !thr->task->final_task) { if (gomp_create_target_task (devicep, (void (*) (void *)) NULL, mapnum, hostaddrs, sizes, kinds, flags | GOMP_TARGET_FLAG_UPDATE, depend, NULL, GOMP_TARGET_TASK_DATA)) return; } else { struct gomp_team *team = thr->ts.team; /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; gomp_task_maybe_wait_for_dependencies (depend); } } } if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return; struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; gomp_update (devicep, mapnum, hostaddrs, sizes, kinds, true); } static void gomp_exit_data (struct gomp_device_descr *devicep, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds) { const int typemask = 0xff; size_t i; gomp_mutex_lock (&devicep->lock); if (devicep->state == GOMP_DEVICE_FINALIZED) { gomp_mutex_unlock (&devicep->lock); return; } for (i = 0; i < mapnum; i++) { struct splay_tree_key_s cur_node; unsigned char kind = kinds[i] & typemask; switch (kind) { case GOMP_MAP_FROM: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_DELETE: case GOMP_MAP_RELEASE: case GOMP_MAP_ZERO_LEN_ARRAY_SECTION: case GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION: cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = cur_node.host_start + sizes[i]; splay_tree_key k = (kind == GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION || kind == GOMP_MAP_ZERO_LEN_ARRAY_SECTION) ? gomp_map_0len_lookup (&devicep->mem_map, &cur_node) : splay_tree_lookup (&devicep->mem_map, &cur_node); if (!k) continue; if (k->refcount > 0 && k->refcount != REFCOUNT_INFINITY) k->refcount--; if ((kind == GOMP_MAP_DELETE || kind == GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION) && k->refcount != REFCOUNT_INFINITY) k->refcount = 0; if ((kind == GOMP_MAP_FROM && k->refcount == 0) || kind == GOMP_MAP_ALWAYS_FROM) gomp_copy_dev2host (devicep, (void *) cur_node.host_start, (void *) (k->tgt->tgt_start + k->tgt_offset + cur_node.host_start - k->host_start), cur_node.host_end - cur_node.host_start); if (k->refcount == 0) { splay_tree_remove (&devicep->mem_map, k); if (k->link_key) splay_tree_insert (&devicep->mem_map, (splay_tree_node) k->link_key); if (k->tgt->refcount > 1) k->tgt->refcount--; else gomp_unmap_tgt (k->tgt); } break; default: gomp_mutex_unlock (&devicep->lock); gomp_fatal ("GOMP_target_enter_exit_data unhandled kind 0x%.2x", kind); } } gomp_mutex_unlock (&devicep->lock); } void GOMP_target_enter_exit_data (int device, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds, unsigned int flags, void **depend) { struct gomp_device_descr *devicep = resolve_device (device); /* If there are depend clauses, but nowait is not present, block the parent task until the dependencies are resolved and then just continue with the rest of the function as if it is a merged task. Until we are able to schedule task during variable mapping or unmapping, ignore nowait if depend clauses are not present. */ if (depend != NULL) { struct gomp_thread *thr = gomp_thread (); if (thr->task && thr->task->depend_hash) { if ((flags & GOMP_TARGET_FLAG_NOWAIT) && thr->ts.team && !thr->task->final_task) { if (gomp_create_target_task (devicep, (void (*) (void *)) NULL, mapnum, hostaddrs, sizes, kinds, flags, depend, NULL, GOMP_TARGET_TASK_DATA)) return; } else { struct gomp_team *team = thr->ts.team; /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; gomp_task_maybe_wait_for_dependencies (depend); } } } if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return; struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; size_t i; if ((flags & GOMP_TARGET_FLAG_EXIT_DATA) == 0) for (i = 0; i < mapnum; i++) if ((kinds[i] & 0xff) == GOMP_MAP_STRUCT) { gomp_map_vars (devicep, sizes[i] + 1, &hostaddrs[i], NULL, &sizes[i], &kinds[i], true, GOMP_MAP_VARS_ENTER_DATA); i += sizes[i]; } else gomp_map_vars (devicep, 1, &hostaddrs[i], NULL, &sizes[i], &kinds[i], true, GOMP_MAP_VARS_ENTER_DATA); else gomp_exit_data (devicep, mapnum, hostaddrs, sizes, kinds); } bool gomp_target_task_fn (void *data) { struct gomp_target_task *ttask = (struct gomp_target_task *) data; struct gomp_device_descr *devicep = ttask->devicep; if (ttask->fn != NULL) { void *fn_addr; if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || !(fn_addr = gomp_get_target_fn_addr (devicep, ttask->fn)) || (devicep->can_run_func && !devicep->can_run_func (fn_addr))) { ttask->state = GOMP_TARGET_TASK_FALLBACK; gomp_target_fallback (ttask->fn, ttask->hostaddrs); return false; } if (ttask->state == GOMP_TARGET_TASK_FINISHED) { if (ttask->tgt) gomp_unmap_vars (ttask->tgt, true); return false; } void *actual_arguments; if (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) { ttask->tgt = NULL; actual_arguments = ttask->hostaddrs; } else { ttask->tgt = gomp_map_vars (devicep, ttask->mapnum, ttask->hostaddrs, NULL, ttask->sizes, ttask->kinds, true, GOMP_MAP_VARS_TARGET); actual_arguments = (void *) ttask->tgt->tgt_start; } ttask->state = GOMP_TARGET_TASK_READY_TO_RUN; devicep->async_run_func (devicep->target_id, fn_addr, actual_arguments, ttask->args, (void *) ttask); return true; } else if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return false; size_t i; if (ttask->flags & GOMP_TARGET_FLAG_UPDATE) gomp_update (devicep, ttask->mapnum, ttask->hostaddrs, ttask->sizes, ttask->kinds, true); else if ((ttask->flags & GOMP_TARGET_FLAG_EXIT_DATA) == 0) for (i = 0; i < ttask->mapnum; i++) if ((ttask->kinds[i] & 0xff) == GOMP_MAP_STRUCT) { gomp_map_vars (devicep, ttask->sizes[i] + 1, &ttask->hostaddrs[i], NULL, &ttask->sizes[i], &ttask->kinds[i], true, GOMP_MAP_VARS_ENTER_DATA); i += ttask->sizes[i]; } else gomp_map_vars (devicep, 1, &ttask->hostaddrs[i], NULL, &ttask->sizes[i], &ttask->kinds[i], true, GOMP_MAP_VARS_ENTER_DATA); else gomp_exit_data (devicep, ttask->mapnum, ttask->hostaddrs, ttask->sizes, ttask->kinds); return false; } void GOMP_teams (unsigned int num_teams, unsigned int thread_limit) { if (thread_limit) { struct gomp_task_icv *icv = gomp_icv (true); icv->thread_limit_var = thread_limit > INT_MAX ? UINT_MAX : thread_limit; } (void) num_teams; } void * omp_target_alloc (size_t size, int device_num) { if (device_num == GOMP_DEVICE_HOST_FALLBACK) return malloc (size); if (device_num < 0) return NULL; struct gomp_device_descr *devicep = resolve_device (device_num); if (devicep == NULL) return NULL; if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return malloc (size); gomp_mutex_lock (&devicep->lock); void *ret = devicep->alloc_func (devicep->target_id, size); gomp_mutex_unlock (&devicep->lock); return ret; } void omp_target_free (void *device_ptr, int device_num) { if (device_ptr == NULL) return; if (device_num == GOMP_DEVICE_HOST_FALLBACK) { free (device_ptr); return; } if (device_num < 0) return; struct gomp_device_descr *devicep = resolve_device (device_num); if (devicep == NULL) return; if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) { free (device_ptr); return; } gomp_mutex_lock (&devicep->lock); gomp_free_device_memory (devicep, device_ptr); gomp_mutex_unlock (&devicep->lock); } int omp_target_is_present (void *ptr, int device_num) { if (ptr == NULL) return 1; if (device_num == GOMP_DEVICE_HOST_FALLBACK) return 1; if (device_num < 0) return 0; struct gomp_device_descr *devicep = resolve_device (device_num); if (devicep == NULL) return 0; if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return 1; gomp_mutex_lock (&devicep->lock); struct splay_tree_s *mem_map = &devicep->mem_map; struct splay_tree_key_s cur_node; cur_node.host_start = (uintptr_t) ptr; cur_node.host_end = cur_node.host_start; splay_tree_key n = gomp_map_0len_lookup (mem_map, &cur_node); int ret = n != NULL; gomp_mutex_unlock (&devicep->lock); return ret; } int omp_target_memcpy (void *dst, void *src, size_t length, size_t dst_offset, size_t src_offset, int dst_device_num, int src_device_num) { struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL; bool ret; if (dst_device_num != GOMP_DEVICE_HOST_FALLBACK) { if (dst_device_num < 0) return EINVAL; dst_devicep = resolve_device (dst_device_num); if (dst_devicep == NULL) return EINVAL; if (!(dst_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || dst_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) dst_devicep = NULL; } if (src_device_num != GOMP_DEVICE_HOST_FALLBACK) { if (src_device_num < 0) return EINVAL; src_devicep = resolve_device (src_device_num); if (src_devicep == NULL) return EINVAL; if (!(src_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || src_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) src_devicep = NULL; } if (src_devicep == NULL && dst_devicep == NULL) { memcpy ((char *) dst + dst_offset, (char *) src + src_offset, length); return 0; } if (src_devicep == NULL) { gomp_mutex_lock (&dst_devicep->lock); ret = dst_devicep->host2dev_func (dst_devicep->target_id, (char *) dst + dst_offset, (char *) src + src_offset, length); gomp_mutex_unlock (&dst_devicep->lock); return (ret ? 0 : EINVAL); } if (dst_devicep == NULL) { gomp_mutex_lock (&src_devicep->lock); ret = src_devicep->dev2host_func (src_devicep->target_id, (char *) dst + dst_offset, (char *) src + src_offset, length); gomp_mutex_unlock (&src_devicep->lock); return (ret ? 0 : EINVAL); } if (src_devicep == dst_devicep) { gomp_mutex_lock (&src_devicep->lock); ret = src_devicep->dev2dev_func (src_devicep->target_id, (char *) dst + dst_offset, (char *) src + src_offset, length); gomp_mutex_unlock (&src_devicep->lock); return (ret ? 0 : EINVAL); } return EINVAL; } static int omp_target_memcpy_rect_worker (void *dst, void *src, size_t element_size, int num_dims, const size_t *volume, const size_t *dst_offsets, const size_t *src_offsets, const size_t *dst_dimensions, const size_t *src_dimensions, struct gomp_device_descr *dst_devicep, struct gomp_device_descr *src_devicep) { size_t dst_slice = element_size; size_t src_slice = element_size; size_t j, dst_off, src_off, length; int i, ret; if (num_dims == 1) { if (__builtin_mul_overflow (element_size, volume[0], &length) || __builtin_mul_overflow (element_size, dst_offsets[0], &dst_off) || __builtin_mul_overflow (element_size, src_offsets[0], &src_off)) return EINVAL; if (dst_devicep == NULL && src_devicep == NULL) { memcpy ((char *) dst + dst_off, (char *) src + src_off, length); ret = 1; } else if (src_devicep == NULL) ret = dst_devicep->host2dev_func (dst_devicep->target_id, (char *) dst + dst_off, (char *) src + src_off, length); else if (dst_devicep == NULL) ret = src_devicep->dev2host_func (src_devicep->target_id, (char *) dst + dst_off, (char *) src + src_off, length); else if (src_devicep == dst_devicep) ret = src_devicep->dev2dev_func (src_devicep->target_id, (char *) dst + dst_off, (char *) src + src_off, length); else ret = 0; return ret ? 0 : EINVAL; } /* FIXME: it would be nice to have some plugin function to handle num_dims == 2 and num_dims == 3 more efficiently. Larger ones can be handled in the generic recursion below, and for host-host it should be used even for any num_dims >= 2. */ for (i = 1; i < num_dims; i++) if (__builtin_mul_overflow (dst_slice, dst_dimensions[i], &dst_slice) || __builtin_mul_overflow (src_slice, src_dimensions[i], &src_slice)) return EINVAL; if (__builtin_mul_overflow (dst_slice, dst_offsets[0], &dst_off) || __builtin_mul_overflow (src_slice, src_offsets[0], &src_off)) return EINVAL; for (j = 0; j < volume[0]; j++) { ret = omp_target_memcpy_rect_worker ((char *) dst + dst_off, (char *) src + src_off, element_size, num_dims - 1, volume + 1, dst_offsets + 1, src_offsets + 1, dst_dimensions + 1, src_dimensions + 1, dst_devicep, src_devicep); if (ret) return ret; dst_off += dst_slice; src_off += src_slice; } return 0; } int omp_target_memcpy_rect (void *dst, void *src, size_t element_size, int num_dims, const size_t *volume, const size_t *dst_offsets, const size_t *src_offsets, const size_t *dst_dimensions, const size_t *src_dimensions, int dst_device_num, int src_device_num) { struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL; if (!dst && !src) return INT_MAX; if (dst_device_num != GOMP_DEVICE_HOST_FALLBACK) { if (dst_device_num < 0) return EINVAL; dst_devicep = resolve_device (dst_device_num); if (dst_devicep == NULL) return EINVAL; if (!(dst_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || dst_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) dst_devicep = NULL; } if (src_device_num != GOMP_DEVICE_HOST_FALLBACK) { if (src_device_num < 0) return EINVAL; src_devicep = resolve_device (src_device_num); if (src_devicep == NULL) return EINVAL; if (!(src_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || src_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) src_devicep = NULL; } if (src_devicep != NULL && dst_devicep != NULL && src_devicep != dst_devicep) return EINVAL; if (src_devicep) gomp_mutex_lock (&src_devicep->lock); else if (dst_devicep) gomp_mutex_lock (&dst_devicep->lock); int ret = omp_target_memcpy_rect_worker (dst, src, element_size, num_dims, volume, dst_offsets, src_offsets, dst_dimensions, src_dimensions, dst_devicep, src_devicep); if (src_devicep) gomp_mutex_unlock (&src_devicep->lock); else if (dst_devicep) gomp_mutex_unlock (&dst_devicep->lock); return ret; } int omp_target_associate_ptr (void *host_ptr, void *device_ptr, size_t size, size_t device_offset, int device_num) { if (device_num == GOMP_DEVICE_HOST_FALLBACK) return EINVAL; if (device_num < 0) return EINVAL; struct gomp_device_descr *devicep = resolve_device (device_num); if (devicep == NULL) return EINVAL; if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) return EINVAL; gomp_mutex_lock (&devicep->lock); struct splay_tree_s *mem_map = &devicep->mem_map; struct splay_tree_key_s cur_node; int ret = EINVAL; cur_node.host_start = (uintptr_t) host_ptr; cur_node.host_end = cur_node.host_start + size; splay_tree_key n = gomp_map_lookup (mem_map, &cur_node); if (n) { if (n->tgt->tgt_start + n->tgt_offset == (uintptr_t) device_ptr + device_offset && n->host_start <= cur_node.host_start && n->host_end >= cur_node.host_end) ret = 0; } else { struct target_mem_desc *tgt = gomp_malloc (sizeof (*tgt)); tgt->array = gomp_malloc (sizeof (*tgt->array)); tgt->refcount = 1; tgt->tgt_start = 0; tgt->tgt_end = 0; tgt->to_free = NULL; tgt->prev = NULL; tgt->list_count = 0; tgt->device_descr = devicep; splay_tree_node array = tgt->array; splay_tree_key k = &array->key; k->host_start = cur_node.host_start; k->host_end = cur_node.host_end; k->tgt = tgt; k->tgt_offset = (uintptr_t) device_ptr + device_offset; k->refcount = REFCOUNT_INFINITY; array->left = NULL; array->right = NULL; splay_tree_insert (&devicep->mem_map, array); ret = 0; } gomp_mutex_unlock (&devicep->lock); return ret; } int omp_target_disassociate_ptr (void *ptr, int device_num) { if (device_num == GOMP_DEVICE_HOST_FALLBACK) return EINVAL; if (device_num < 0) return EINVAL; struct gomp_device_descr *devicep = resolve_device (device_num); if (devicep == NULL) return EINVAL; if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)) return EINVAL; gomp_mutex_lock (&devicep->lock); struct splay_tree_s *mem_map = &devicep->mem_map; struct splay_tree_key_s cur_node; int ret = EINVAL; cur_node.host_start = (uintptr_t) ptr; cur_node.host_end = cur_node.host_start; splay_tree_key n = gomp_map_lookup (mem_map, &cur_node); if (n && n->host_start == cur_node.host_start && n->refcount == REFCOUNT_INFINITY && n->tgt->tgt_start == 0 && n->tgt->to_free == NULL && n->tgt->refcount == 1 && n->tgt->list_count == 0) { splay_tree_remove (&devicep->mem_map, n); gomp_unmap_tgt (n->tgt); ret = 0; } gomp_mutex_unlock (&devicep->lock); return ret; } #ifdef PLUGIN_SUPPORT /* This function tries to load a plugin for DEVICE. Name of plugin is passed in PLUGIN_NAME. The handles of the found functions are stored in the corresponding fields of DEVICE. The function returns TRUE on success and FALSE otherwise. */ static bool gomp_load_plugin_for_device (struct gomp_device_descr *device, const char *plugin_name) { const char *err = NULL, *last_missing = NULL; void *plugin_handle = dlopen (plugin_name, RTLD_LAZY); if (!plugin_handle) goto dl_fail; /* Check if all required functions are available in the plugin and store their handlers. None of the symbols can legitimately be NULL, so we don't need to check dlerror all the time. */ #define DLSYM(f) \ if (!(device->f##_func = dlsym (plugin_handle, "GOMP_OFFLOAD_" #f))) \ goto dl_fail /* Similar, but missing functions are not an error. Return false if failed, true otherwise. */ #define DLSYM_OPT(f, n) \ ((device->f##_func = dlsym (plugin_handle, "GOMP_OFFLOAD_" #n)) \ || (last_missing = #n, 0)) DLSYM (version); if (device->version_func () != GOMP_VERSION) { err = "plugin version mismatch"; goto fail; } DLSYM (get_name); DLSYM (get_caps); DLSYM (get_type); DLSYM (get_num_devices); DLSYM (init_device); DLSYM (fini_device); DLSYM (load_image); DLSYM (unload_image); DLSYM (alloc); DLSYM (free); DLSYM (dev2host); DLSYM (host2dev); device->capabilities = device->get_caps_func (); if (device->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) { DLSYM (run); DLSYM (async_run); DLSYM_OPT (can_run, can_run); DLSYM (dev2dev); } if (device->capabilities & GOMP_OFFLOAD_CAP_OPENACC_200) { if (!DLSYM_OPT (openacc.exec, openacc_exec) || !DLSYM_OPT (openacc.register_async_cleanup, openacc_register_async_cleanup) || !DLSYM_OPT (openacc.async_test, openacc_async_test) || !DLSYM_OPT (openacc.async_test_all, openacc_async_test_all) || !DLSYM_OPT (openacc.async_wait, openacc_async_wait) || !DLSYM_OPT (openacc.async_wait_async, openacc_async_wait_async) || !DLSYM_OPT (openacc.async_wait_all, openacc_async_wait_all) || !DLSYM_OPT (openacc.async_wait_all_async, openacc_async_wait_all_async) || !DLSYM_OPT (openacc.async_set_async, openacc_async_set_async) || !DLSYM_OPT (openacc.create_thread_data, openacc_create_thread_data) || !DLSYM_OPT (openacc.destroy_thread_data, openacc_destroy_thread_data)) { /* Require all the OpenACC handlers if we have GOMP_OFFLOAD_CAP_OPENACC_200. */ err = "plugin missing OpenACC handler function"; goto fail; } unsigned cuda = 0; cuda += DLSYM_OPT (openacc.cuda.get_current_device, openacc_cuda_get_current_device); cuda += DLSYM_OPT (openacc.cuda.get_current_context, openacc_cuda_get_current_context); cuda += DLSYM_OPT (openacc.cuda.get_stream, openacc_cuda_get_stream); cuda += DLSYM_OPT (openacc.cuda.set_stream, openacc_cuda_set_stream); if (cuda && cuda != 4) { /* Make sure all the CUDA functions are there if any of them are. */ err = "plugin missing OpenACC CUDA handler function"; goto fail; } } #undef DLSYM #undef DLSYM_OPT return 1; dl_fail: err = dlerror (); fail: gomp_error ("while loading %s: %s", plugin_name, err); if (last_missing) gomp_error ("missing function was %s", last_missing); if (plugin_handle) dlclose (plugin_handle); return 0; } /* This function finalizes all initialized devices. */ static void gomp_target_fini (void) { int i; for (i = 0; i < num_devices; i++) { bool ret = true; struct gomp_device_descr *devicep = &devices[i]; gomp_mutex_lock (&devicep->lock); if (devicep->state == GOMP_DEVICE_INITIALIZED) { ret = devicep->fini_device_func (devicep->target_id); devicep->state = GOMP_DEVICE_FINALIZED; } gomp_mutex_unlock (&devicep->lock); if (!ret) gomp_fatal ("device finalization failed"); } } /* This function initializes the runtime needed for offloading. It parses the list of offload targets and tries to load the plugins for these targets. On return, the variables NUM_DEVICES and NUM_DEVICES_OPENMP will be set, and the array DEVICES initialized, containing descriptors for corresponding devices, first the GOMP_OFFLOAD_CAP_OPENMP_400 ones, follows by the others. */ static void gomp_target_init (void) { const char *prefix ="libgomp-plugin-"; const char *suffix = SONAME_SUFFIX (1); const char *cur, *next; char *plugin_name; int i, new_num_devices; num_devices = 0; devices = NULL; cur = OFFLOAD_TARGETS; if (*cur) do { struct gomp_device_descr current_device; next = strchr (cur, ','); plugin_name = (char *) malloc (1 + (next ? next - cur : strlen (cur)) + strlen (prefix) + strlen (suffix)); if (!plugin_name) { num_devices = 0; break; } strcpy (plugin_name, prefix); strncat (plugin_name, cur, next ? next - cur : strlen (cur)); strcat (plugin_name, suffix); if (gomp_load_plugin_for_device (&current_device, plugin_name)) { new_num_devices = current_device.get_num_devices_func (); if (new_num_devices >= 1) { /* Augment DEVICES and NUM_DEVICES. */ devices = realloc (devices, (num_devices + new_num_devices) * sizeof (struct gomp_device_descr)); if (!devices) { num_devices = 0; free (plugin_name); break; } current_device.name = current_device.get_name_func (); /* current_device.capabilities has already been set. */ current_device.type = current_device.get_type_func (); current_device.mem_map.root = NULL; current_device.state = GOMP_DEVICE_UNINITIALIZED; current_device.openacc.data_environ = NULL; for (i = 0; i < new_num_devices; i++) { current_device.target_id = i; devices[num_devices] = current_device; gomp_mutex_init (&devices[num_devices].lock); num_devices++; } } } free (plugin_name); cur = next + 1; } while (next); /* In DEVICES, sort the GOMP_OFFLOAD_CAP_OPENMP_400 ones first, and set NUM_DEVICES_OPENMP. */ struct gomp_device_descr *devices_s = malloc (num_devices * sizeof (struct gomp_device_descr)); if (!devices_s) { num_devices = 0; free (devices); devices = NULL; } num_devices_openmp = 0; for (i = 0; i < num_devices; i++) if (devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) devices_s[num_devices_openmp++] = devices[i]; int num_devices_after_openmp = num_devices_openmp; for (i = 0; i < num_devices; i++) if (!(devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)) devices_s[num_devices_after_openmp++] = devices[i]; free (devices); devices = devices_s; for (i = 0; i < num_devices; i++) { /* The 'devices' array can be moved (by the realloc call) until we have found all the plugins, so registering with the OpenACC runtime (which takes a copy of the pointer argument) must be delayed until now. */ if (devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENACC_200) goacc_register (&devices[i]); } if (atexit (gomp_target_fini) != 0) gomp_fatal ("atexit failed"); } #else /* PLUGIN_SUPPORT */ /* If dlfcn.h is unavailable we always fallback to host execution. GOMP_target* routines are just stubs for this case. */ static void gomp_target_init (void) { } #endif /* PLUGIN_SUPPORT */
sc35.c
/*TODO in future s - Non-equilibrium candidate moves - check scaling of particles of different sizes - should scale with contact area! - cell list - divide simulation box in cells where particles interact with each other and outside is definitely 0 - safe time better scaling with system size, possibly long spherovylinders could be in several celles to keep good scaling - better cluster algorithm - put in wang-landau - cluster list work for spherocylinders only now */ /*------------------------------------------------------------------------------ Version 3.5 - linear bond at spherocylinders, where second spherocilinder is harmonicaly attached to a point that is in distance of bondlength from the first spherocylinder and it follows the direction of spherocylinder - bonded particles belong to the same cluster - print energy at statistical reports - have particles of different lengths - interaction scaling back to v1+v2 (no addition of 1.0) - more physical */ /*------------------------------------------------------------------------------ Version 3.4 - New handling of the option file - reaction coordinate radius around z axis for a pore calculations - reaction coordinate as number of particles in contact (defined by distance of CMs) - 2D Wang-Landau method - New Wang-Landau coordinate - radius pore in vesicle around begining of xy plane - New models TPSC, TCPSC, TCHPSC, TCHCPSC- models with two patches note that switch function on sides of patch are linear in cos angle not in angle as a results two patches with overlaping sides do not compensate easily to a flat profile - FIX chirality was doubled (angle twice as large) - Added posibility of exluded interactions [EXCLUDE] in topology file - MPI replica exchange with different temperatures and pressure (paraltemp paralpress) input configuration is #{number of process}config.init, if it does not exist config.init is used each replica is with different random seed = seed+mpirank - config.init can look like movie snapshot - MPI exchange with Wang-Landau - added angular interaction between neighboring spherocylinders (in chain) angle1 is angle between sc directions and angle2 ins angle between the patches */ /*------------------------------------------------------------------------------- Version 3.3 -external potantial can be added as a part of topology - it can be hard or attractive wall */ /** * Changes made by Noah S. Bieler and Robert Vacha: * * New version 3.2 * * - The length has now to be specified in the topology file, but they are not * allowed to differ from each other. The option file shall no longer contain * a length option. * - The particles can now switch their type based on the chemical potential * delta_mu (= energy difference from state 2 to state 1). * - For that a new option was introduced: Average attempts per sweep to switch * a type. * - A lot of variables are now combined in either topo, sim or conf. The rule * should be: * > topo: Everything that belongs to the topology and that should not change * during the game. * > sim: Options and stuff, that has to do with the simulation. (Maybe the * current target and so should be saved in there as well) * > conf: What changes every step concerning the particles and the box or * in other words: what has been read from conf.init * - added a cluster determing routine => sim->clusterlist + sim->clusters * - added macros for TRUE and FALSE * - Added Option for the random seed * - Basic Neighbour list implemented * - New types: chiral CPSC (CHCPSC) and chiral PSC (CHPSC) and their interactions */ /*-------------------------------------------------------------------------------- sc31.c Patchy Spherocylinder Version 3.1 Wang-Landau method of free energy calculations It is set in options file as: O = none, 1 = z-distance of 1st paticle from system CM, 2 = hole in xyplane of SCA = membrane hole It reads a file wl.dat and write wl-new at the end. There is value of alpha at the first line and then there are three columns: 1- order parameter, 2- weights, 3- histogram Interaction of spherocylinders is scaled based on the volume of attractive patch, the unit of one is that two spheres of diameter sigma =1.0 are attracting each other by 1.0. Using this in interaction among lipids and spherocylinders should be consistent. Start up configuration "config.init" file has a box size at the first line now. (I tested performance: compilation with optimization -O2 speed up 10% rest has negligible effect including usage of static arrays instead of dynamic most of the time consumes paire function. 6,519,638,177 :simulate 6,492,411,300 :energyone 5,705,685,593 :paire 542,561,887 :bondenergy 489,463,361 :eattractive11 450,443,970 :image 115,126,519 :erepulsive */ /* -------------------------------------------------------------------------------- sc3.c Patchy Spherocylinder Version 3.0 Beads were added to the particle list. bead(10) - repulsive bead(11) - isotropocally attractive -It is necessary to provide also a topology file (top.init) -Particles are placed in chains according to the topology order including connections -Particle arryas are allocated dynamicly on heap now -dispacement and rotation are optimized for highest RMSD performace -NPT ensemble with isotropic and anisotropic couplings, in pressure moves all particles are rescaled with their center (chains are not rescaled with CM) 0 - anisotropic coupling, 1 - isotropic coupling, 2 - isotropic in xy z=const bead types and their interactions repulsive(10) purely repulsive shpere with WCA potential on closest distance parameters: Patch repulsion sigma - defined where repulsion reaches zero isotropic(11) - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts. Parameters: distance of attractivity (should be at least sigma*2^(1/6)) defines how far is attraction constant -e. After this distance follows switch length on which attraction goes to zero as cos^2. Rest as repulsive model. sc2.c Patchy Spherocylinder Version 2.0 It is possible to make chains of spherocylinders that are connected through hemispherical caps by harmonic bond. There are two parameters eq distance and strength of harmonic spring, note that units are in 1 kT/e, the MC strength of bond is changing with parameter temperature.. Patchy Spherocylinder Version 1.0 Includes diffferent types of possible interactions: repulsive(0) - purely repulsive spherocylinder with WCA potential on closest distance. parameters: Patch repulsion sigma - defined where repulsion reaches zero. isotropic(1) - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders. Parameters: distance of patch, Interaction distance of patch (should be at least sigma*2^(1/6)) defines how far is attraction constant -e. After this distance follows Switch length on which attraction goes to zero as cos^2. Rest as repulsive model. patchy(2) - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive. Parameters:Anglular part has a parameter defining it size "Angular size of patch (degrees)" and witdh of switch function "Angular switch off of patch (degrees)" on which attraction reaches zero - it is a linear function. Rest as isotropic model. cylindrical(3) - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive. Rest as patchy model. Note particles are inside numbered from 0, there is prealocated size of particles MAXN because in future there can be grand canonical ensamble and number of particles may vary Follows mc of hard wall spherocylinder version 7 by Mark Miller -description below sc.c Version 1 Performs basic constant volume MC simulation of hard spherocylinders with rigid cuboidal boundary conditions. Run parameters are read in from the file "options". The template for this file appears at the end of the code. The values must be inserted before the colons. The initial configuration is read from the file "config.init". The first line contain size of box The format for the file is nine columns: three for the positions and three for the direction vector and three for direction of pathc. The direction vectors are normalised after being read in. The configuration is checked for particle overlaps. The unit of length is taken as the spherocylinder diameter. Hence the ratio L/D is equal to the length of the cylinder. Order parameters for nematic and smectic order are evaluated. The nematic order parameter is related to the coefficient of the quadratic term in the Legendre expansion of the orientational distribution function. Any smectic order is assumed to be directed along the z axis, and is detected by the coefficients of the Fourier expansion of the position distribution function. MM 12.vii.01 .................................................................................. Version 2 The aspect ratio of the box may now fluctuate, keeping the volume constant. Two new parameters are required in the options file to specify the average number of attempted shape changes per sweep, and the initial maximum trial change in a box dimension. Shape changes are made by picking one of the three box lengths at random, making a random change, evenly distributed between plus and minus a finite interval, choosing a second direction and doing the same, then determining the new length in the remaining direction from the condition of constant volume. The step-size equilibration period is now split into three parts: displacement, rotation, and shape change. The most important change to the code is that the particle coordinates are now stored as fractions of the box dimensions. However, input and output configurations are still communicated in units of the cylinder diameter, D=1. Note that the displacement maximum step size is now specified as a fraction of the box length, not as an absolute distance. MM 18.vii.01 .................................................................................. Version 3 Constant pressure MC. The volume may fluctuate. Volume changes are attempted by altering just one box length at a time, chosen at random. The running average of the density is calculated and reported. MM 24.vii.01 .................................................................................. Version 7 The composite translation-plus-rotation moves have been split into separate move types, each of which is attempted with equal probability. This enables acceptance ratios to be accumulated separately for these degrees of freedom, so that maximum step sizes can be adjusted more sensibly. A few other things have been tidied up, such as defining structures for the book-keeping of statistics and acceptance ratios. MM 9.v.02 --------------------------------------------------------------------------------*/ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <stdbool.h> #ifdef MACOS # include "getline.h" #endif #ifdef MPI # include <mpi.h> #endif /* Macros for DEBUG messages */ #ifdef DEBUGGING_INIT #define DEBUG_INIT(...) fprintf(stderr, "DB in INIT: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG_INIT(...) #endif #ifdef DEBUGGING_SIM #define DEBUG_SIM(...) fprintf(stderr, "DB in SIM: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG_SIM(...) #endif #ifdef DEBUGGING #define DEBUG(...) fprintf(stderr, "DB: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG(...) #endif /* End of DEBUG macros */ /* With pairlist ? */ #define WITH_PAIRLIST /* Boolean Macros */ #define BOOL int #define TRUE 1 #define FALSE 0 /* End of Boolean Macros */ #define MAXF 20 /* Maximum number of Fourier terms */ #define MAXN 14000 /* Maximum number of particles */ #define MAXCHL 10 /* Maximum length of chain */ #define ZEROTOL 1.0e-12 /* Dot products below ZEROTOL are deemed zero */ #define ZEROTOL2 1.0e-8 /* numbers below ZEROTOL are deemed zero */ #define PI 3.141592653589793238462643383279 /* pi */ #define PIH 1.57079632679489661923132169163975 /* pi half*/ /*Particle types*/ #define SC 10 /*spherocylinder*/ #define SCN SC+0 /*spherocylinder non-attractive*/ #define SCA SC+1 /*spherocylinder isotropicaly attractive*/ #define PSC SC+2 /*spherocylinder with patchy attraction*/ #define CPSC SC+3 /*spherocylinder with cylindrical patchy attraction*/ #define CHPSC SC+4 /* chiral psc */ #define CHCPSC SC+5 /* chiral cpsc */ #define TPSC SC+6 /*spherocylinder with two patches*/ #define TCPSC SC+7 /*spherocylinder with two cylindrical patches*/ #define TCHPSC SC+8 /* chiral 2psc */ #define TCHCPSC SC+9 /* chiral 2cpsc */ #define SP 30 /*sphere - should be over all apherocylinders*/ #define SPN SP+0 /* sphere non-attractive*/ #define SPA SP+1 /* spherocylinder isotropicaly attractive*/ #define MAXT 30 /* Maximum number of types we have*/ #define MAXMT 100 /* Maximum number of molecular types */ /*Reading topology*/ #define SMSTR 64 /* Small string length */ #define STRLEN 400 /* maximum length of line*/ #define CONTINUE '\\' /* symbol for line continue*/ #define COMMENTSIGN '#' /* symbol for comment*/ #define OPENKEY '[' /* starting sign for keyword*/ #define CLOSEKEY ']' /* ending sign for keyword*/ #define SEPARATOR ':' /* sign for separator*/ #define OPENMOL '{' /* starting sign for molecules*/ #define CLOSEMOL '}' /* ending sign for molecules*/ #define BOXSEP 'x' /* extraction of box*/ /* Wang Landau method */ #define WL_GERR 0.0001 /* Max roughnes in histogram */ #define WL_ALPHATOL 0.000001 /* Covergence crietria for detailed balance */ #define WL_MINHIST 1000 /* Minimum histogram sampling for considering roughness */ #define WL_ZERO 0.000000000000 /* Zero for histogram with current weights*/ #define WL_CONTACTS 36.0 /* Square distance under which are particles in contact */ /* Math */ #define DOT(a,b) ((a).x * (b).x + (a).y * (b).y + (a).z * (b).z) /* Dot product */ #define AVER(a,b) ((a+b)*0.5) /* Arithmetic average*/ #define ROUND(a) (a > 0.0) ? floor(a + 0.5) : ceil(a - 0.5); /* Round double*/ #define PMONE(a) (1 - 2 * a) /* Takes 1 or 0, return +-1 */ /* Acceptance ratio */ #define RATIO(a) ( ((a).acc+(a).rej) > 0 ? 1.0*(a).acc/((a).acc+(a).rej) : 0.0 ) #define INBOX(a,b) ( a > 0 ? modf(a,&b) : modf(a,&b)+1 ) /*................................................................ Structure definitions */ struct vector { /* Define a 3D vector structure */ double x; double y; double z; }; struct quat { /* Define a quaternion structure */ double w; double x; double y; double z; }; struct particles { /* Define a particle */ struct vector pos; /* Position vector */ struct vector dir; /* Unit direction vector of axis */ struct vector patchdir[2]; /* Vector defining orientation of patch */ struct vector patchsides[4]; /* Vector defining sides of patch */ struct vector chdir[2]; /* Direction for chirality - keep in memory to increase speed */ long chaint; /* Chain type*/ long chainn; /* Chain number*/ int type; /* Type of the particle */ int switchtype; /* With which kind of particle do you want to switch?*/ double delta_mu; /* Chemical potential for the switch */ int switched; /* 0: in initial stat; 1: in the switched stat */ }; struct ia_param{ /* Contatins properties and parameters of particle types */ char name[SMSTR]; /* The name of the particle type */ char other_name[SMSTR]; /* The name of the particle type */ int geotype[2]; /* The geometrical type: spherocylinder (0-repulsive, 1-isotropic, 2-patchy, 3-cylindrical) or sphere (0-repulsive, 1-isotropic) */ double sigma; /* Repulsion wca*/ double epsilon; /* Repulsion strength*/ double pdis; /* Interaction distance of patch */ double pswitch; /* Switch of distance of patch */ double pangl[4]; /* angular size of patch as was specifid in input */ double panglsw[4]; /* angular size of patchswitch as was specifid in input */ double pcangl[4]; /* cosine of half size angle - rotation from patch direction to side */ double pcanglsw[4]; /* cosine of half size angle plus switch - rotation from patch direction to side */ double rcut; /* Cutoff for attraction */ double rcutwca; /* Cutoff for repulsion*/ double pcoshalfi[4]; /* Cosine of half angle going to side of interaction */ double psinhalfi[4]; /* Sine of half angle going to side of interaction -useful for quaterion rotation */ double csecpatchrot[2]; /* Cosine of Rotation of second patches in 2psc models*/ double ssecpatchrot[2]; /* Sine of Rotation of second patches in 2psc models*/ double volume; /* Volume of particle for geometrical center calculations*/ double pvolscale; /* Scale of patch volume size*/ double len[2]; /* Length of the PSC */ double half_len[2]; /* Half length of the PSC */ double chiral_cos[2]; /* Coctains the cosinus for the chiral rotation of the patch */ double chiral_sin[2]; /* Contains the sinus for the chiral rotation of the patch */ }; struct interacts { /* Parameters pased to functions of interaction calculation */ double dist; /* closest distance */ struct vector distvec; /* vector of closes distance */ struct particles * part1; /* particle 1 */ struct particles * part2; /* particle 2 */ struct vector box; /* box size */ struct ia_param * param; /* interaction parameters */ struct vector r_cm; /* vector connecting center of masses */ double distcm; /* distance between center of masses */ double dotrcm; /* square size of r_cm*/ double contt; /* closest point on spherocylinder to sphere */ }; struct chainparams { /*Parameters for inner interaction in chains*/ double bond1eq; /* Equilibrium distance of harmonic bond between nearest neighbours*/ double bond1c; /* Spring constant for harmonic bond between nearest neighbours*/ double bond2eq; /* Equilibrium distance of harmonic bond between second nearest neighbours*/ double bond2c; /* Spring constant for harmonic bond between second nearest neighbours*/ double bonddeq; /* Equilibrium distance of directional harmonic bond between the nearest neighbours*/ double bonddc; /* Spring constant for directional harmonic bond between the nearest neighbours*/ double angle1eq; /* Equilibrium angle between two spherocylinders -neerest neighbours*/ double angle1c; /* Spring constant angle between two spherocylinders -nearest neighbours*/ double angle2eq; /* Equilibrium angle between two spherocylinder patches -nearest neighbours*/ double angle2c; /* Spring constant for angle between two spherocylinder patches -nearest neighbours*/ }; struct molecule { /* This structure is for io only */ char * name; /* The name of the molecule */ long * type; /* The type of the particle */ long * switchtype; /* The switchtype of the particle */ double * delta_mu; /* The chemical potential for the switch */ }; struct disp { /* Define step size and acceptance ratio statistics */ double mx; /* Maximum value displacement, cos(angle), etc. */ double angle; /* Maximum angle, since in .mx cos(angle) is saved */ long acc; /* Number of accepted steps */ long rej; /* Number of rejected steps */ double oldrmsd; /* Averaged mx value in previous equilibration round */ double oldmx; /* Change in mx in last equlibrium step */ }; struct stat { /* Define statistics counters */ double sum; double sum2; long samples; double mean; double rms; }; struct meshs { /* Mesh for hole order parameter */ int dim[2]; /* Mesh dimensions */ int *data; /* Mesh data */ int *tmp; /* tmpporary list for hole search */ }; struct wls { /* Wang landau method (wl) */ double *weights; /* Array of weights for wl method */ long *hist; /* Array of histogram for wl method */ long length[2]; /* Length of above arrays */ double dorder[2]; /* Increments of order parameter */ double minorder[2]; /* Minimum order parameter */ double alpha; /* Current modifier of weights */ long currorder[2]; /* Walue of current order parameter*/ long neworder[2]; /* wl order parameter in new step */ long max; /* wl maximum of histogram */ long min; /* wl minimum of histogram */ double wmin; /* weights minimum */ int wlmdim; /* Dimwnsionality of wang landau */ int wlmtype; /* Atom type for the Wang landau method (wl) */ double wl_meshsize; /* Size of mesh bin for hole order paremeter*/ struct meshs mesh; /* Mesh for hole order */ struct meshs origmesh; /* Mesh store for rejected moves */ long * radiushole; /* Array for hole radius around origin */ long * radiusholeold; /* Array for hole radius around origin-bigmove */ long radiusholemax; /* Size of array for hole radius*/ long partincontact; /* Number of particles in contact */ long partincontactold; /* Number of particles in contact - old for move*/ }; struct pairs{ /* The structure holding the particle numbers of the pairs and the number of pairs */ long num_pairs; /* The number of pairs */ long * pairs; /* The paritcle numbers of the paris */ }; struct pairlist{ /* I think, this is done too complicated: just sim->pairs[npart] should be enough */ struct pairs * list; /* contains the pairlist of all paritcles */ }; struct cluster{ /* contains all the particles of one cluster */ long npart; long * particles; }; struct exters{ BOOL exist; /* existence of external potential*/ double thickness; /* external wall thicnkess*/ double epsilon; /* depth of attraction*/ double attraction; /* distance of attraction*/ double sqmaxcut; /* distance when nothing can interact*/ struct ia_param interactions[MAXT]; /* Interaction parameters with particle types generated from above params*/ }; struct topo{ /* It would be nice, if this struct would contain all the topo stuff in the end*/ long * switchlist; /* List containing the number of all the particles with switchtypes */ long n_switch_part; /* number of particles with switchtype */ double sqmaxcut; /* square of distance over which even spherocylinders cannot interact (distance between CM) */ double maxcut; /* distance over which even spherocylinders cannot interact (distance between CM) */ long conlist[MAXN][4]; /* Connectivity list, we have connection to tail and head and secon neighbours so far*/ long chainlist[MAXN][MAXCHL]; /* List of chains*/ long chainnum; /* Number of chains */ struct chainparams chainparam[MAXMT]; /* parameters for chains */ struct ia_param ia_params[MAXT][MAXT]; /* parametrization of particles for all interations*/ long npart; /* Number of particles */ struct exters exter; /* external potential - wall */ }; struct sim{ /* Should contain mostly all the simulation options and variables, that can change in every step. */ double press; /* Pressure */ double paralpress; /* Parallel pressure for replica exachnge*/ double dpress; /* Pressure change for replica exchange*/ double shave; /* Average number of volume changes to attempt per sweep */ double shprob; /* Probability of attempting a volume change */ double chainprob; /* Average number of chain move attempt per sweep */ double switchprob; /* Average number of type switch attempt per sweep */ int pairlist_update; /* Number of sweep per upedating the pairlist */ double temper; /* Temperature*/ double paraltemper; /* Temperature for parallel tempering */ double dtemp; /* Temprature step */ int ptype; /* Type of pressure coupling*/ long adjust; /* Number of sweeps between step size adjustments */ long movie; /* Number of sweeps between movie frames */ long nequil; /* Number of equilibration sweeps */ long nsweeps; /* Number of production sweeps */ long paramfrq; /* Number of sweeps between order parameter samples */ long report; /* Number of sweeps between statistics reports */ // long terms; /* Number of Fourier terms as smectic order parameters */ long nrepchange; /* Number of sweeps between replica exchanges */ int wlm[2]; /* Wang landau method (wl) */ struct disp edge; /* Maximum box length change and statistics */ struct disp rot[MAXT]; /* Maximum rotation and statistics */ struct disp trans[MAXT]; /* Maximum translation and statistics*/ struct disp chainm[MAXMT]; /* Maximum translation for chain and statistics*/ struct disp chainr[MAXMT]; /* Maximum rotation for chain and statistics */ struct disp mpiexch; /* MPI statistics*/ struct pairs * pairlist; /* The pairlist */ long write_cluster; /* Number of sweeps per writing out cluster info */ long * clusterlist; /* clusterlist[i] = cluster index of particle i */ struct cluster * clusters; /* informations about the single clusters */ double *clustersenergy; /* list of energies of clusters*/ long num_cluster; /* number of single clusters */ long * clusterstat; /* Statistics about the seize of cluster */ long max_clust; /* maximal clustersize */ struct wls wl; /* Wang landau data */ int mpirank; /* MPI number for given process*/ int mpinprocs; /* MPI number of processes */ }; typedef enum { /* Holds the type of a variable in struct option */ Int, Int2, Long, Double } Type; typedef struct { /* for reading in the options */ char *id; /* The name of the value in the option file*/ Type type; /* The type (int, double or long) */ BOOL set; /* Wheter the variable has been set */ void *var; /* The variable */ } Option; struct conf{ /* Configuration of the system*/ struct particles * particle; /* All particles*/ struct vector box; /* Box size*/ double sysvolume; /* Something like total mass*/ struct vector syscm; /* System center of mass*/ }; struct filenames { /* input files */ char configurationinfile[30]; char topologyfile[30]; char optionsfile[30]; char wlinfile[30]; /* output files */ char configurationoutfile[30]; char moviefile[30]; char wloutfile[30]; char statfile[30]; char clusterfile[30]; char clusterstatfile[30]; char energyfile[30]; }; struct mpiexchangedata{ /* extra type for mpi communication*/ struct vector box; /* box of configuration */ double energy; /* energy of configuration */ double volume; /* volume of configuration */ int accepted; /* bool if accepted */ struct vector syscm; /* system CM of configuration */ long radiusholemax; /* size of array for WL*/ long wl_order[2]; /* wang-landau order parameter*/ }; #ifdef MPI MPI_Datatype MPI_vector, MPI_Particle, MPI_exchange; #endif const struct stat nullstat = {0.0, 0.0, 0, 0.0, 0.0}; long seed = 6; /* Seed for random number generator */ /*..............................................................................*/ int main(int argc, char **argv) { DEBUG("start"); FILE *outfile,*mov; /* Handle for writing configuration */ double (* intfce[MAXT][MAXT])(struct interacts *); /*array of interaction functions*/ struct topo topo; /* will maybe contain all the topo stuff in future */ struct sim sim; /* Should contain the simulation options. */ struct conf conf; /* Should contain fast changing particle and box(?) information */ struct filenames files; int memoryalloc(struct conf * conf); int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim); void read_options(struct sim* sim, char filename[30]); void init_top(struct topo *, struct conf * conf, struct sim * sim, char filename[30]); void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]); void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo); void draw(FILE *, struct conf * conf, struct topo * topo); void printeqstat(struct disp *, double, int); void simulate(long nsweeps, long adjust, long paramfrq, long report, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files); void init_pairlist(struct topo * topo, struct sim * sim); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo); int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) ); int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf); int print_clusters(FILE * stream, BOOL decor, struct sim * sim); int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim); int sort_clusterlist(struct topo * topo, struct sim * sim); printf ("\nPatchy Spherocylinders version 3.5 "); sprintf(files.configurationinfile, "config.init"); sprintf(files.configurationoutfile, "config.last"); sprintf(files.optionsfile, "options"); sprintf(files.topologyfile, "top.init"); sprintf(files.moviefile, "movie"); sprintf(files.wlinfile, "wl.dat"); sprintf(files.wloutfile, "wl-new.dat"); sprintf(files.statfile, "stat.dat"); sprintf(files.clusterfile, "cluster.dat"); sprintf(files.clusterstatfile, "cluster_stat.dat"); sprintf(files.energyfile, "energy.dat"); #ifdef MPI FILE *infile; printf(" MPI version"); MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &(sim.mpinprocs) ); MPI_Comm_rank(MPI_COMM_WORLD, &(sim.mpirank) ); sprintf(files.configurationoutfile, "%dconfig.last", sim.mpirank); sprintf(files.moviefile, "%dmovie", sim.mpirank); sprintf(files.wloutfile, "%dwl-new.dat", sim.mpirank); sprintf(files.clusterfile, "%dcluster.dat", sim.mpirank); sprintf(files.clusterstatfile, "%dcluster_stat.dat", sim.mpirank); sprintf(files.energyfile, "%denergy.dat", sim.mpirank); sprintf(files.statfile, "%dstat.dat", sim.mpirank); /*test if there is a specific input configuration for mpi run*/ sprintf(files.configurationinfile, "%dconfig.init", sim.mpirank); infile = fopen(files.configurationinfile, "r"); if (infile != NULL) fclose (infile); else sprintf(files.configurationinfile, "config.init"); /*test if there is a specific input wang-landau for mpi run*/ sprintf(files.wlinfile, "%dwl.dat", sim.mpirank); infile = fopen(files.wlinfile, "r"); if (infile != NULL) fclose (infile); else sprintf(files.wlinfile, "wl.dat"); #endif printf ("\n-------------------------------------\n"); printf ("Reading options...\n"); read_options(&sim,files.optionsfile); init_top(&topo, &conf, &sim,files.topologyfile); if (topo.chainnum ==0) { /*no chain make the probability of moving them 0*/ if (sim.chainprob > 0) printf ("No chains... chain move probability set to 0.\n"); sim.chainprob = 0; } printf ("\nReading configuration...\n"); init_config(&topo, &conf, &sim, files.configurationinfile); printf ("Equilibration of maximum step sizes: %ld sweeps\n", sim.nequil/2); fflush (stdout); if ( sim.wlm[0] > 0 ) { outfile = fopen(files.wlinfile, "r"); if (outfile == NULL) { printf ("ERROR: Cannot open file for Wang-Landau method (%s).\n",files.wlinfile); memorydealloc(&conf, &topo, &sim); exit(1); } fclose (outfile); } /* Empty movie file */ mov = fopen("movie", "w"); fclose (mov); printf ("\nInitializing energy functions...\n"); init_intfce(intfce, &topo); if (sim.pairlist_update) { init_pairlist(&topo, &sim); } if (sim.nequil) { printf("\nStart equilibration...\n"); simulate(sim.nequil/2, sim.adjust, 0, 0, intfce, &topo, &sim, &conf,&files); simulate(sim.nequil/2, 0, 0, 0, intfce, &topo, &sim, &conf,&files); printf (" Equilibrated maximum displacement / acceptance ratio: \n"); printeqstat(sim.trans,2.0,MAXT); printf (" Equilibrated maximum rotation / acceptance ratio: \n"); printeqstat(sim.rot,1.0,MAXT); printf (" Equilibrated maximum box length change / acceptance ratio: \n"); printf (" %.6le / %.6le\n", sim.edge.mx/2.0,RATIO(sim.edge)); printf (" Equilibrated maximum displacement of chain / acceptance ratio: \n"); printeqstat(sim.chainm,2.0,MAXMT); printf (" Equilibrated maximum rotation of chain / acceptance ratio: \n"); printeqstat(sim.chainr,1.0,MAXMT); printf ("\n"); printf ("Further equilibration of configuration: %ld sweeps\n", sim.nequil/2); fflush (stdout); outfile = fopen("config.eq", "w"); fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z); draw (outfile, &conf, &topo); fclose (outfile); printf (" Equilibrated configuration written to config.eq\n"); printf (" Box dimensions: %.10lf, %.10lf, %.10lf\n\n", conf.box.x, conf.box.y, conf.box.z); } printf ("Production run: %ld sweeps\n\n", sim.nsweeps); fflush (stdout); simulate(sim.nsweeps, 0, sim.paramfrq, sim.report, intfce, &topo, &sim, &conf,&files); #ifdef MPI printf (" MPI replica changeT / changeP / acceptance ratio: \t %.6lf / %.6lf / %.6lf\n\n", sim.mpiexch.mx,sim.mpiexch.angle,RATIO(sim.mpiexch)); #endif outfile = fopen(files.configurationoutfile, "w"); #ifdef TESTING fprintf (outfile, "%15.6le %15.6le %15.6le\n", conf.box.x, conf.box.y, conf.box.z); #else fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z); #endif draw (outfile, &conf, &topo); fclose (outfile); // For testing the pairlist //gen_pairlist(&topo, &sim, &conf); //FILE * fpairlist; //fpairlist = fopen("pairlist.dat", "w"); //print_pairlist(fpairlist, &sim, &topo); //fclose(fpairlist); //printf("sqmaxcut = %lf\n", topo.sqmaxcut); //// For testing the cluster algorithm //gen_clusterlist(&topo, &sim, &conf); //print_clusterlist(stdout, TRUE, &topo, &sim, &conf); //sort_clusterlist(&topo, &sim); //print_clusters(stdout, TRUE, &sim); //print_clusterstat(stdout, TRUE, &sim); if (memorydealloc(&conf, &topo, &sim)) exit(1); #ifdef MPI MPI_Finalize(); #endif printf ("\nDone\n\n"); return 0; } /*..............................................................................*/ /*.........................SIMULATION RUN.......................................*/ /*..............................................................................*/ void simulate(long nsweeps, long adjust, long paramfrq, long report, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files) { long i,j,wli; long next_adjust; /* Next sweep number for step size adjustment */ long next_calc; /* Next sweep number for order parameter calculation */ long next_dump; /* Next sweep number for reporting statistics */ long next_frame; /* Next sweep number for dumping a movie frame */ long step; /* Step number within a given sweep */ long sweep; /* Current sweep number */ //struct stat nem; /* Nematic order parameter */ //struct stat vol; /* Volume statistics */ //struct stat shapex, shapey, shapez; /* Box shape statistics */ //struct stat smec[MAXF]; /* Smectic order parameters (Fourier coeeficients) */ FILE *mf; /* Handle for movie file */ FILE *cl_stat, *cl, *cl_list; /* Handle for cluster statistics */ FILE *ef, *statf; /* Handle for energy file and statistical file*/ double edriftstart; /* Energy drift calculation - start */ double edriftchanges; /* Energy drift calculation - accumulate all changes through moves */ double edriftend; /* Energy drift calculation - end */ double pvdriftstart; /* PV drift calculation - start */ double pvdriftend; /* PV drift calculation - end */ double volume; /* volume of box*/ double moveprobab; /* random number selecting the move*/ /* function declarations */ //double nematic(long, struct particles *); double ran2(long *); //double smectic(long, struct particles *, long); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); void accumulate(struct stat *, double); void draw(FILE *, struct conf * conf, struct topo * topo); void optimizestep(struct disp *, double, double); void optimizerot(struct disp *, double, double); void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf ); int wlinit(struct wls *, char filename[30]); int wlwrite(struct wls *, char filename[30]); int wlend(struct wls *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_end(struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf * conf,int); void mesh_print (struct meshs *); void masscenter(long, struct ia_param [MAXT][MAXT], struct conf * conf); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep, struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double particlemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double chainmove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long sweep); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long radiushole_position(double, struct sim *,int); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); double alignment_order(struct conf * conf, struct topo * topo); /* Opening files for cluster statistics */ cl_stat = cl = cl_list = ef = statf = NULL; if(sim->write_cluster){ // Empty file cl_stat = fopen(files->clusterstatfile, "w"); fclose(cl_stat); cl_stat = fopen(files->clusterstatfile, "a"); // Empty file cl = fopen(files->clusterfile, "w"); fclose(cl); cl = fopen(files->clusterfile, "a"); } /* write energy*/ if (report < nsweeps){ // Empty file ef = fopen(files->energyfile, "w"); fclose(ef); ef = fopen(files->energyfile, "a"); fprintf (ef, "# sweep energy\n"); statf = fopen(files->statfile, "w"); fclose(statf); statf = fopen(files->statfile, "a"); fprintf (statf, "# sweep volume\n"); } /*=== Initialise counters etc. ===*/ // double pvolume; /* Volume of all particles*/ /* pvolume =0.0; for (i=0;i < topo->npart;i++) { if (conf->particle[i].type>=0 ) pvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume; }*/ sim->shprob = sim->shave/(double)topo->npart; for (i=0;i<MAXT;i++){ sim->rot[i].acc = 0; sim->rot[i].rej = 0; sim->rot[i].oldrmsd = 0; sim->rot[i].oldmx = 0; sim->trans[i].acc = 0; sim->trans[i].rej = 0; sim->trans[i].oldrmsd = 0; sim->trans[i].oldmx = 0; } for (i=0;i<MAXMT;i++){ sim->chainm[i].acc = 0; sim->chainm[i].rej = 0; sim->chainm[i].oldrmsd = 0; sim->chainm[i].oldmx = 0; sim->chainr[i].acc = 0; sim->chainr[i].rej = 0; sim->chainr[i].oldrmsd = 0; sim->chainr[i].oldmx = 0; } //(*edge).acc = (*edge).rej = (*edge).oldrmsd = (*edge).oldmx = 0; sim->edge.acc = sim->edge.rej = sim->edge.oldrmsd = sim->edge.oldmx = 0; sim->mpiexch.acc = sim->mpiexch.rej = sim->mpiexch.oldrmsd = sim->mpiexch.oldmx = 0; /*Initialize some values at begining*/ partvecinit(topo,sim,conf); next_adjust = adjust; next_calc = paramfrq; next_dump = report; next_frame = sim->movie; //nem = vol = shapex = shapey = shapez = nullstat; //for (i=0; i<MAXF; i++) smec[i] = nullstat; if (sim->movie > 0) { mf = fopen(files->moviefile, "a"); } else { mf = NULL; } sim->wl.wl_meshsize = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.radiusholemax = 0; sim->wl.partincontactold = 0; sim->wl.partincontact = 0; sim->wl.wlmdim = 0; sim->wl.wlmdim = 0; sim->wl.length[0]=0; sim->wl.length[1]=0; sim->wl.currorder[0]=0; sim->wl.currorder[1]=0; sim->wl.neworder[0]=0; sim->wl.neworder[1]=0; sim->wl.weights = NULL; sim->wl.hist = NULL; masscenter(topo->npart,topo->ia_params, conf); /* Initialization of wang-landaou method*/ if ( sim->wlm[0] >0 ) { if (wlinit(&sim->wl,files->wlinfile) != 0) return; sim->wl.wlmdim = 1 ; if ( sim->wlm[1] > 0 ) sim->wl.wlmdim = 2 ; for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: masscenter(topo->npart,topo->ia_params, conf); sim->wl.currorder[wli] = z_order(&sim->wl,conf,wli); break; case 2: sim->wl.wl_meshsize = (topo->ia_params[sim->wl.wlmtype][sim->wl.wlmtype].sigma) / 3.0; // TODO sim->wl.mesh.data = NULL; sim->wl.mesh.tmp = NULL; sim->wl.origmesh.data = NULL; sim->wl.origmesh.tmp = NULL; sim->wl.currorder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 3: sim->wl.currorder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); break; case 4: sim->wl.currorder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: masscenter(topo->npart,topo->ia_params, conf); sim->wl.radiusholemax = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: sim->wl.radiusholemax = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.currorder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.currorder[wli] = 0; break; } if ( (sim->wl.currorder[wli] >= sim->wl.length[wli] ) || (sim->wl.currorder[wli] < 0) ) { printf("Error: starting Wang-Landau method with order parameter %f out of range(%f - %f)\n\n", sim->wl.dorder[wli]*sim->wl.currorder[wli] + \ sim->wl.minorder[wli], sim->wl.minorder[wli], sim->wl.minorder[wli]+sim->wl.dorder[wli]*sim->wl.length[wli] ); wlend(&sim->wl); return; } } if (sim->wl.alpha < WL_ALPHATOL/100) sim->wl.alpha = WL_ZERO; fflush (stdout); } /*do moves - START OF REAL MC*/ if(sim->pairlist_update){ gen_pairlist(topo, sim, conf); // Does that solve the problem? } /*do energy drift check - start calculation*/ volume = conf->box.x * conf->box.y * conf->box.z; edriftstart = calc_energy(0, intfce, 0, topo, conf, sim,0); pvdriftstart = sim->press * volume - (double)topo->npart * log(volume) / sim->temper; //printf("starting energy: %.15f \n",calc_energy(0, intfce, 0, topo, conf, sim,0)); //printf("press: %.15f\n",sim->press * volume - (double)topo->npart * log(volume) / sim->temper); edriftchanges = 0.0; for (sweep=1; sweep <= nsweeps; sweep++) { // Try replica exchange if((sim->nrepchange) && (sweep % sim->nrepchange == 0)){ edriftchanges += replicaexchangemove(topo,sim,conf,intfce,sweep); } // Generate the pairlist if((sim->pairlist_update) && (sweep % sim->pairlist_update == 0)){ gen_pairlist(topo, sim, conf); } //normal moves for (step=1; step <= topo->npart; step++) { moveprobab = ran2(&seed); if ( moveprobab < sim->shprob) { /* pressure moves*/ edriftchanges += pressuremove(topo,sim,conf,intfce); } else { if (moveprobab < sim->shprob + sim->chainprob) { /* single particle moves*/ edriftchanges += chainmove(topo,sim,conf,intfce); } else if (moveprobab < sim->shprob + sim->chainprob + sim->switchprob){ /*=== This is an attempt to switch a type ===*/ edriftchanges += switchtypemove(topo,sim,conf,intfce); } else { /* single particle moves*/ edriftchanges += particlemove(topo,sim,conf,intfce); } /* end of else next to chain moves */ } /* end of else next to volume moves */ } /**** End of step loop for this sweep ****/ /*=== Start of end-of-sweep housekeeping ===*/ /* Adjustment of maximum step sizes during equilibration */ if (sweep == next_adjust) { for (i = 0; i < MAXT ;i++) { if ((sim->trans[i].acc > 0)||(sim->trans[i].rej >0)) optimizestep (sim->trans + i, 1.5, 0.0); if ((sim->rot[i].acc > 0)||(sim->rot[i].rej >0)) optimizerot (sim->rot + i, 5.0, 0.01); } for (i = 0; i < MAXMT; i++) { if ((sim->chainm[i].acc > 0)||(sim->chainm[i].rej > 0)) optimizestep (sim->chainm + i, 1.5, 0.0); if ((sim->chainr[i].acc > 0)||(sim->chainr[i].rej > 0)) optimizerot (sim->chainr + i, 5.0, 0.01); } optimizestep (&(sim->edge), 1.0, 0.0); next_adjust += adjust; } if ( (sim->wlm[0] > 0) && (sim->wl.alpha > WL_ZERO) && !(sweep % 1000) ) { /* recalculate system CM to be sure there is no accumulation of errors by +- rejection moves*/ /* BUG - not used any longer: caused problems with PBC normal moves systemCM movement can be calculated from CM movements of individual particles present center of mass calculation use pbc and thus particles that moved across the box is in this calculation used in pripary box but in other moves in in in the particles position if ( (sim->wlm[0] == 1) || (sim->wlm[1] == 1) ) masscenter(topo->npart,topo->ia_params, conf); */ sim->wl.min = sim->wl.hist[0]; sim->wl.max = sim->wl.hist[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]]; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; for (j=1;j < sim->wl.length[1];j++) { if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]]; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; } } if ( sim->wl.min > WL_MINHIST ) { if ( sim->temper * log(sim->wl.max/sim->wl.min) < WL_GERR ) { /*DEBUG for (i=1;i<wl.length;i++) { printf (" %15.8le %15ld %15.8f\n",sim->wl.weights[i],sim->wl.hist[i],particle[0].pos.z); fflush(stdout); } */ if ( sim->wl.alpha < WL_ALPHATOL) break; sim->wl.alpha/=2; printf("%f \n", sim->wl.alpha); fflush (stdout); sim->wl.wmin = sim->wl.weights[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; sim->wl.hist[i+j*sim->wl.length[0]] = 0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; for (j=1;j < sim->wl.length[1];j++) { sim->wl.hist[i+j*sim->wl.length[0]] = 0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; } } } } } if (!(sweep % 10000)) { /*reinitialize pach vectors to avoid cummulation of errors*/ partvecinit(topo,sim,conf); } /* Sampling of statistics */ if (sweep == next_calc) { /*s2 = nematic(npart, particle); accumulate (&nem, s2); for (i=0; i<terms; i++) { ci = smectic(npart, particle, i+1); accumulate (&smec[i], ci); } accumulate (&shapex, (*box).x); accumulate (&shapey, (*box).y); accumulate (&shapez, (*box).z); volume = (*box).x * (*box).y * (*box).z; accumulate (&vol, volume); next_calc += paramfrq; */ } /* Writing of statistics */ if (sweep == next_dump) { /*printf ("Statistics after %ld sweeps:\n", sweep); printf (" Mean and RMS fluctuation of S2: %13.8lf %13.8lf\n", nem.mean, nem.rms); for (i=0; i<terms; i++) { printf (" Mean & fluc. Fourier coeff. %3ld: %13.8lf %13.8lf\n", i+1, smec[i].mean, smec[i].rms); } printf (" Mean & fluc box dimensions: x %13.8lf %13.8lf\n", shapex.mean, shapex.rms); printf (" y %13.8lf %13.8lf\n", shapey.mean, shapey.rms); printf (" z %13.8lf %13.8lf\n", shapez.mean, shapez.rms); printf (" Mean & fluctuation volume: %13.8lf %13.8lf\n", vol.mean, vol.rms); printf (" Mean & fluc. volume over volume of particles: %13.8lf %13.8lf\n", vol.mean/pvolume, vol.rms/pvolume); printf ("\n"); fflush (stdout); */ fprintf (statf, " %ld; %.10lf\n", sweep, conf->box.x * conf->box.y * conf->box.z); fprintf (ef, " %ld; %.10lf %f \n", sweep, calc_energy(0, intfce, 0, topo, conf, sim,0), alignment_order(conf,topo)); if (sim->wlm[0] > 0) { wlwrite(&sim->wl,files->wloutfile); } next_dump += report; } /* Writing of movie frame */ if (sweep == next_frame) { fprintf (mf, "%ld\n", topo->npart); fprintf (mf, "sweep %ld; box %.10lf %.10lf %.10lf\n", sweep, conf->box.x, conf->box.y, conf->box.z); draw (mf, conf, topo); fflush (mf); next_frame += sim->movie; } /* Writing out cluster statistics */ if(sim->write_cluster && (sweep % sim->write_cluster == 0)){ write_cluster(cl_stat, cl, cl_list, FALSE, sweep, sim, topo, conf, intfce); } /*=== End of housekeeping ===*/ } /**** End of sweeps loop ****/ /*do energy drift check - at the end calculation*/ volume = conf->box.x * conf->box.y * conf->box.z; edriftend = calc_energy(0, intfce, 0, topo, conf, sim,0); pvdriftend = sim->press * volume - (double)topo->npart * log(volume) / sim->temper; printf("Energy drift: %.15lf \n",edriftend - edriftstart - edriftchanges +pvdriftend -pvdriftstart); printf("Starting energy+pv: %.8lf \n",edriftstart+pvdriftstart); printf("Starting energy: %.8lf \n",edriftstart); fflush(stdout); /* End wang-landau*/ if (sim->wlm[0] > 0) { sim->wl.min = sim->wl.hist[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; for (j=1;j < sim->wl.length[1];j++) { if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; } } sim->wl.wmin = sim->wl.weights[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; for (j=1;j < sim->wl.length[1];j++) { sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; } } wlwrite(&sim->wl,files->wloutfile); wlend(&sim->wl); if ( (sim->wlm[0] == 2)||(sim->wlm[1] == 2) ) { mesh_end(&sim->wl.mesh); mesh_end(&sim->wl.origmesh); } if ( (sim->wlm[0] == 5)||(sim->wlm[1] == 5)||(sim->wlm[0] == 6)||(sim->wlm[1] == 6) ) { if ( sim->wl.radiushole != NULL ) free(sim->wl.radiushole); if ( sim->wl.radiusholeold != NULL ) free(sim->wl.radiusholeold); } } /*end movie*/ if (sim->movie > 0) fclose (mf); /*end cluster*/ if(sim->write_cluster){ fclose(cl_stat); fclose(cl); } if (report < nsweeps) { fclose(ef); fclose(statf); } } /*..................................MOVES.........................................*/ /*................................................................................*/ /*..............................PARTICLE MOVES....................................*/ double particlemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges =0.0; long target; double ran2(long *); double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target); double partrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target); /*=== This is a particle move step ===*/ target = ran2(&seed) * topo->npart; if ( !( ((sim->wlm[0] == 3) || (sim->wlm[1] == 3) ) && (target == 0) ) && \ ((ran2(&seed) < 0.5) || (topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0] >= SP)) ) { /* no rotation for spheres */ //target = 1; //printf ("displacement\n\n"); edriftchanges = partdisplace(topo,sim,conf,intfce,target); } else { /*=== Rotation step ===*/ edriftchanges = partrotate(topo,sim,conf,intfce,target); } /*=== End particle move step ===*/ return edriftchanges; } /*................................................................................*/ double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target) { double edriftchanges,energy,enermove,wlener; struct vector orig, dr, origsyscm; int reject=0,wli; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *, long); void wlaccept(int, struct wls *); long meshorder_moveone(struct vector, struct vector, struct meshs *, long, long, \ struct conf * conf, struct sim * sim, int wli); int mesh_cpy(struct meshs *, struct meshs *); //void mesh_print (struct meshs *); long z_order(struct wls *, struct conf * conf, int wli); long twopartdist(struct wls *, struct conf *conf, int wli); struct vector ranvec(void); int longarray_cpy (long **, long **, long, long); long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target, int wli,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Displacement step ===*/ edriftchanges =0.0; origsyscm.x = 0; origsyscm.y = 0; origsyscm.z = 0; energy = calc_energy(target, intfce, 1, topo, conf, sim,0); orig = conf->particle[target].pos; dr = ranvec(); //ran = sqrt(ran2(&seed)); dr.x *= sim->trans[conf->particle[target].type].mx/conf->box.x; dr.y *= sim->trans[conf->particle[target].type].mx/conf->box.y; dr.z *= sim->trans[conf->particle[target].type].mx/conf->box.z; if ( ((sim->wlm[0] == 3)||(sim->wlm[1] == 3)) && (target == 0) ) { dr.z = 0; dr.y = 0; dr.x = 0; } conf->particle[target].pos.x += dr.x; conf->particle[target].pos.y += dr.y; conf->particle[target].pos.z += dr.z; //} while (conf->particle[target].pos.x < 0.25 || conf->particle[target].pos.x > 0.50); reject = 0; wlener = 0.0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: origsyscm = conf->syscm; conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_moveone(orig, conf->particle[target].pos, &sim->wl.mesh, topo->npart, target, conf, sim,wli); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; origsyscm = conf->syscm; conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_moveone(&orig, conf, sim,target,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_moveone(&orig,conf,sim,target,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = calc_energy(target, intfce, 1, topo, conf, sim,0); } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ conf->particle[target].pos = orig; sim->trans[conf->particle[target].type].rej++; if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) conf->syscm = origsyscm; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->trans[conf->particle[target].type].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; //printf("%lf\t%lf\n", conf->particle[0].pos.z * conf->box.z , enermove); //printf("%.12f\t%.12f\t%.12f\n", energy , enermove,edriftchanges); } return edriftchanges; } /*................................................................................*/ double partrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target) { double edriftchanges,energy,enermove,wlener; struct particles origpart; int reject=0,wli; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); void normalise(struct vector *); void ortogonalise(struct vector *,struct vector); void psc_rotate(struct particles *,double,int); /*=== Rotation step ===*/ //printf ("rotation %ld npart %ld\n\n",target,npart); energy = calc_energy(target, intfce, 1, topo, conf, sim,0); origpart = conf->particle[target]; psc_rotate(&conf->particle[target],sim->rot[conf->particle[target].type].angle, topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0]); /*should be normalised and ortogonal but we do for safety*/ normalise (&conf->particle[target].dir); ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir); reject = 0; edriftchanges =0.0; wlener = 0.0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 3: if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* only rotation change direction */ break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = calc_energy(target, intfce, 1, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->particle[target] = origpart; sim->rot[conf->particle[target].type].rej++; wlreject(sim,sim->wl.radiusholemax); } else { /* move was accepted */ // DEBUG //fprintf(fenergy, "%lf\t%lf\n", conf->particle[1].pos.x * conf->box.x , enermove); sim->rot[conf->particle[target].type].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; //printf("%lf\t%lf\n", conf->particle[0].patchdir[0].z, enermove); } return edriftchanges; } /*..................... This is an attempt to switch a type.................................*/ double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) ) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; long target; double radiusholemax_orig=0; double ran2(long *); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); void int_partvec(long, struct ia_param *, struct conf *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_cpy(struct meshs *, struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); int longarray_cpy (long **target, long **source,long,long); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== This is an attempt to switch a type ===*/ edriftchanges =0.0; wlener = 0.0; target = ran2(&seed) * topo->n_switch_part; target = topo->switchlist[target]; DEBUG_SIM("Switching the particle type"); DEBUG_SIM("PARTICLE: %ld", target); energy = calc_energy(target, intfce, 1, topo, conf, sim,0); // Start switching the type int switched = conf->particle[target].switched; int pmone = PMONE(switched); DEBUG_SIM("switched = %d", switched); DEBUG_SIM("pmone = %d", pmone); int tmp_type = conf->particle[target].type; conf->particle[target].type = conf->particle[target].switchtype; conf->particle[target].switchtype = tmp_type; conf->particle[target].switched += pmone; int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf); DEBUG_SIM("Particle %ld is %d switched", target, switched); //DEBUG #ifdef DEBUGGING_SIM if ((abs(pmone) != 1) || (conf->particle[target].type == conf->particle[target].switchtype)){ fprintf(stderr, "ERROR: Something went wrong, when switching the type of particle %ld\n", target); exit(1); } #endif if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { /*case 1: sim->wl.neworder = z_order(&sim->wl, conf,wli); break;*/ case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; /*case 4: sim->wl.neworder = twopartdist(&sim->wl,conf,wli); break;*/ case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { enermove = conf->particle[target].delta_mu * pmone; // DEBUG //double dmu = enermove; //particle[target].switched += pmone; enermove += calc_energy( target, intfce, 1, topo, conf, sim,0); //printf("energy: %lf \t %lf\t%lf\n",particle[target].delta_mu, dmu, enermove); } // If not accepted: switch back if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ DEBUG_SIM("Did NOT switch it\n"); conf->particle[target].switchtype = conf->particle[target].type; conf->particle[target].type = tmp_type; conf->particle[target].switched -= pmone; int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf); wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*.................................CHAIN MOVES....................................*/ /*................................................................................*/ double chainmove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges =0.0; long target; double ran2(long *); double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target); double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target); /*=== This is a chain move step ===*/ target = ran2(&seed) * topo->chainnum; if (ran2(&seed) < 0.5) { /*=== Displacement step of cluster/chain ===*/ edriftchanges = chaindisplace(topo,sim,conf,intfce,target); } else { /*=== Rotation step of cluster/chain ===*/ edriftchanges = chainrotate(topo,sim,conf,intfce,target); } /* ==== END OF CHAIN MOVES ===== */ return edriftchanges; } /*................................................................................*/ double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target) { double edriftchanges,energy,enermove,wlener; struct vector dr, origsyscm; int reject=0,wli; struct vector cluscm; long current,i; struct particles chorig[MAXCHL]; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \ struct sim * sim, struct particles chorig[MAXCHL],int); int mesh_cpy(struct meshs *, struct meshs *); //void mesh_print (struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); struct vector ranvec(void); int longarray_cpy (long **target, long **source,long,long); long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, \ struct sim * sim,struct particles chorig[MAXCHL],int,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Displacement step of cluster/chain ===*/ //printf ("move chain\n\n"); energy =0.0; wlener = 0.0; edriftchanges=0.0; i=0; current = topo->chainlist[target][0]; cluscm.x = 0; cluscm.y = 0; cluscm.z = 0; origsyscm.x = 0; origsyscm.y = 0; origsyscm.z = 0; while (current >=0 ) { /* store old configuration calculate energy*/ chorig[i].pos = conf->particle[current].pos; energy += calc_energy(current, intfce, 2, topo, conf, sim, target); i++; current = topo->chainlist[target][i]; } dr = ranvec(); dr.x *= sim->chainm[conf->particle[target].chaint].mx/conf->box.x; dr.y *= sim->chainm[conf->particle[target].chaint].mx/conf->box.y; dr.z *= sim->chainm[conf->particle[target].chaint].mx/conf->box.z; i=0; if ( ((sim->wlm[0] == 3)||(sim->wlm[1] == 3)) && (target == 0) ) { dr.z = 0; dr.y = 0; dr.x = 0; } current = topo->chainlist[target][0]; while (current >=0 ) { /* move chaine to new position */ if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) { /* calculate move of center of mass */ cluscm.x += dr.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y += dr.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z += dr.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; } conf->particle[current].pos.x += dr.x; conf->particle[current].pos.y += dr.y; conf->particle[current].pos.z += dr.z; i++; current = topo->chainlist[target][i]; } enermove = 0.0; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: origsyscm = conf->syscm; conf->syscm.x += cluscm.x / conf->sysvolume; conf->syscm.y += cluscm.y / conf->sysvolume; conf->syscm.z += cluscm.z / conf->sysvolume; sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; origsyscm = conf->syscm; conf->syscm.x += cluscm.x / conf->sysvolume; conf->syscm.y += cluscm.y / conf->sysvolume; conf->syscm.z += cluscm.z / conf->sysvolume; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { enermove += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { conf->particle[current].pos = chorig[i].pos; i++; current = topo->chainlist[target][i]; } sim->chainm[conf->particle[target].chaint].rej++; if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) conf->syscm = origsyscm; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->chainm[conf->particle[target].chaint].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*................................................................................*/ double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; struct vector cluscm; double chainvolume; long current, i; struct particles chorig[MAXCHL]; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \ struct sim * sim, struct particles chorig[MAXCHL],int); int mesh_cpy(struct meshs *, struct meshs *); void cluster_rotate(long, struct vector, double, struct topo * topo, struct conf * conf); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); int longarray_cpy (long **target, long **source,long,long); long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,\ struct particles chorig[MAXCHL],int,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Rotation step of cluster/chain ===*/ //printf ("rotation of chain\n\n"); energy=0.0; /* set values to zero*/ edriftchanges=0.0; wlener = 0.0; current = topo->chainlist[target][0]; cluscm.x = conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y = conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z = conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; chorig[0] = conf->particle[current]; chainvolume = topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; energy += calc_energy(current, intfce, 2, topo, conf, sim,target); i=1; current = topo->chainlist[target][i]; while (current >=0 ) { /* store old configuration calculate energy*/ chorig[i] = conf->particle[current]; /*We have chains whole! don't have to do PBC*/ /*r_cm.x = conf->particle[current].pos.x - conf->particle[first].pos.x; r_cm.y = conf->particle[current].pos.y - conf->particle[first].pos.y; r_cm.z = conf->particle[current].pos.z - conf->particle[first].pos.z; if ( r_cm.x < 0 ) r_cm.x -= (double)( (long)(r_cm.x-0.5) ); else r_cm.x -= (double)( (long)(r_cm.x+0.5) ); if ( r_cm.y < 0 ) r_cm.y -= (double)( (long)(r_cm.y-0.5) ); else r_cm.y -= (double)( (long)(r_cm.y+0.5) ); if ( r_cm.z < 0 ) r_cm.z -= (double)( (long)(r_cm.z-0.5) ); else r_cm.z -= (double)( (long)(r_cm.z+0.5) ); */ cluscm.x += conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y += conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z += conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; chainvolume += topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; energy += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } cluscm.x = cluscm.x/chainvolume; cluscm.y = cluscm.y/chainvolume; cluscm.z = cluscm.z/chainvolume; /*do actual rotations around geometrical center*/ cluster_rotate(target, cluscm, sim->chainr[conf->particle[target].chaint].angle, topo, conf); enermove=0.0; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: if (target == 0) sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* if we rotated cluster it is around its CM so no change*/ break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli); break; case 3: if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* only rotation change direction */ break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { enermove += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { conf->particle[current] = chorig[i]; i++; current = topo->chainlist[target][i]; } sim->chainr[conf->particle[target].chaint].rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->chainr[conf->particle[target].chaint].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*..............................PRESSURE MOVES....................................*/ /*................................................................................*/ double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; double old_side; /* Box length before attempted change */ double *side; /* Box dimension to try changing */ double psch; /* Size of a box change during pressure */ double pvol; /* Size of a volume during pressure */ double pvoln; /* Size of a new volume during pressure */ double rsave; /* Saved random number */ double area; double radiusholemax_orig=0; double ran2(long *); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_cpy(struct meshs *, struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); int longarray_cpy (long **target, long **source,long,long); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== This is a volume change step ===*/ /*calculate energy*/ edriftchanges=0.0; wlener = 0.0; energy = calc_energy(0, intfce, 0, topo, conf, sim,0); /* Choose an edge */ switch (sim->ptype) { case 0: /* Anisotropic pressure coupling */ rsave = ran2(&seed); if (rsave < 1.0/3.0) { side = &(conf->box.x); area = conf->box.y * conf->box.z; } else if (rsave < 2.0/3.0) { side = &(conf->box.y); area = conf->box.x * conf->box.z; } else { side = &(conf->box.z); area = conf->box.x * conf->box.y; } old_side = *side; *side += sim->edge.mx * (ran2(&seed) - 0.5); reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = sim->press * area * (*side - old_side) - (double)topo->npart * log(*side/old_side) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || *side <= 0.0 || ( movetry(energy,enermove,sim->temper) ) ) { /* probability acceptance */ *side = old_side; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 1: /* Isotropic pressure coupling */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y * conf->box.z; conf->box.x += psch; conf->box.y += psch; conf->box.z += psch; pvoln = conf->box.x * conf->box.y * conf->box.z; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl,conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = sim->press * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; conf->box.z -= psch; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 2: /* Isotropic pressure coupling in xy, z constant */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y; conf->box.x += psch; conf->box.y += psch; pvoln = conf->box.x * conf->box.y; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { /*no change in case 1, it does not change box.z*/ case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = sim->press * conf->box.z * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 3: /* Isotropic pressure coupling in xy, z coupled to have fixed volume */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y * conf->box.z; conf->box.x += psch; conf->box.y += psch; conf->box.z = pvol / conf->box.x / conf->box.y; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; conf->box.z = pvol / conf->box.x / conf->box.y; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; default: fprintf (stderr, "ERROR: unknown type of pressure coupling %d",sim->ptype); exit(1); } /*=== End volume change step ===*/ return edriftchanges; } /*..................... Switch replicas move in MPI ..............................*/ /*.................................................................................*/ double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long sweep ) { double edriftchanges=0.0; #ifdef MPI double change, *recwlweights; MPI_Status status; int oddoreven,count,wli,sizewl = 0; struct mpiexchangedata localmpi,receivedmpi; BOOL reject; long localwl,receivedwl; double ran2(long *); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); int longarray_cpy (long **target, long **source,long,long); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); void wlaccept(int, struct wls *); //int mpi_newdatatypes(); //mpi_newdatatypes(); int i; struct vector vec; struct particles part; struct mpiexchangedata exch; MPI_Aint dispstart; MPI_Datatype MPI_vector; MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE}; int blocklen[3] = {1, 1, 1}; MPI_Aint disp[3]; MPI_Address( &vec, &dispstart); MPI_Address( &(vec.x), &disp[0]); MPI_Address( &(vec.y), &disp[1]); MPI_Address( &(vec.z), &disp[2]); for (i=0; i <3; i++) disp[i] -= dispstart; MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector); MPI_Type_commit( &MPI_vector); MPI_Datatype MPI_Particle; MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT}; int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,}; MPI_Aint disp2[11]; MPI_Address( &part, &dispstart); MPI_Address( &(part.pos), &disp2[0]); MPI_Address( &(part.dir), &disp2[1]); MPI_Address( &(part.patchdir), &disp2[2]); MPI_Address( &(part.patchsides), &disp2[3]); MPI_Address( &(part.chdir), &disp2[4]); MPI_Address( &(part.chaint), &disp2[5]); MPI_Address( &(part.chainn), &disp2[6]); MPI_Address( &(part.type), &disp2[7]); MPI_Address( &(part.switchtype), &disp2[8]); MPI_Address( &(part.delta_mu), &disp2[9]); MPI_Address( &(part.switched), &disp2[10]); for (i=0; i <11; i++) disp2[i] -= dispstart; MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle); MPI_Type_commit( &MPI_Particle); if (sim->wl.length[1] > 0) { sizewl = sim->wl.length[1] * sim->wl.length[0]; } else { sizewl = sim->wl.length[0]; } MPI_Datatype MPI_exchange; MPI_Datatype type3[7] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_INT, MPI_vector, MPI_LONG, MPI_LONG}; int blocklen3[7] = {1, 1, 1, 1, 1, 1, 2}; MPI_Aint disp3[7]; MPI_Address( &exch, &dispstart); MPI_Address( &(exch.box), &disp3[0]); MPI_Address( &(exch.energy), &disp3[1]); MPI_Address( &(exch.volume), &disp3[2]); MPI_Address( &(exch.accepted), &disp3[3]); MPI_Address( &(exch.syscm), &disp3[4]); MPI_Address( &(exch.radiusholemax), &disp3[5]); MPI_Address( &(exch.wl_order), &disp3[6]); for (i=0; i <7; i++) disp3[i] -= dispstart; MPI_Type_struct(7, blocklen3, disp3, type3, &MPI_exchange); MPI_Type_commit( &MPI_exchange); /*=== This is an attempt to switch replicas ===*/ localmpi.box = conf->box; localmpi.energy = calc_energy(0, intfce, 0, topo, conf, sim,0); localmpi.volume = conf->box.x * conf->box.y * conf->box.z; localmpi.accepted = 0; localmpi.syscm = conf->syscm; localmpi.radiusholemax = sim->wl.radiusholemax; recwlweights = malloc( sizeof(double) * sizewl ); for (wli=0;wli<2;wli++) { localmpi.wl_order[wli] = 0; receivedmpi.wl_order[wli] = 0; } for (wli=0;wli<sim->wl.wlmdim;wli++) { localmpi.wl_order[wli] = sim->wl.currorder[wli]; //fprintf(stdout,"wli %d %ld %ld\n\n", wli, localmpi.wl_order[wli], sim->wl.currorder[wli] ); } if ( (sweep % (2*sim->nrepchange)) == 0) /* exchange odd ones with even ones*/ oddoreven=1; else /* exchange even ones with odd ones*/ oddoreven=0; if (sim->mpinprocs == 2) oddoreven=1; count = 1; if (sim->mpirank % 2 == oddoreven) { if (sim->mpirank > 0) { MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank-1, count, MPI_COMM_WORLD); MPI_Send(sim->wl.weights, sizewl, MPI_DOUBLE, sim->mpirank-1, count, MPI_COMM_WORLD); //printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure); MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); /*decision of accepting or rejecting the exchange was done on other process here we took received configuration (if move was accepted))*/ //printf("received data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume,receivedmpi.pressure); if (receivedmpi.accepted == 1) { sim->mpiexch.acc++; struct particles *temppart; temppart = malloc(topo->npart*sizeof(struct particles)); MPI_Recv(temppart, topo->npart, MPI_Particle, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD,&status); /* printf("received data: rank: %d\n", sim->mpirank); printf("part0 x %f y %f z %f\n",temppart[0].pos.x, temppart[0].pos.y, temppart[0].pos.z); printf("part1 x %f y %f z %f\n",temppart[1].pos.x, temppart[1].pos.y, temppart[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",temppart[0].chaint,temppart[0].chainn,temppart[0].type); */ MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank-1, count, MPI_COMM_WORLD); /* printf("send data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ localmpi.accepted = receivedmpi.accepted; conf->box = receivedmpi.box; conf->syscm = receivedmpi.syscm; memcpy(conf->particle,temppart,topo->npart*sizeof(struct particles)); edriftchanges = receivedmpi.energy - localmpi.energy; edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper; if ( sim->wlm[0] >0 ) { for (wli=0;wli<sim->wl.wlmdim;wli++) { sim->wl.neworder[wli] = receivedmpi.wl_order[wli]; } wlaccept(sim->wlm[0],&sim->wl); //exchange wl data mesh size and radius hole s for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 2: /*it is complicated to send because of different sizes we would have to send sizes first and realocate corrrect mesh size and then send data it is better to recalculate (a bit slower though)*/ mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim); break; case 5: //radiushole_all(topo,conf,sim,wli,&(conf->syscm)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 6: //radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 7: //contparticles_all(topo,conf,sim,wli); MPI_Recv(&(sim->wl.partincontactold),1, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); sim->wl.partincontact=sim->wl.partincontactold; break; } } } free(temppart); } else { sim->mpiexch.rej++; if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; } } } } else { if (sim->mpirank+1 < sim->mpinprocs) { /*there is above process*/ MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Recv(recwlweights, sizewl, MPI_DOUBLE, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); /*we got new configuration*/ //printf("received data: rank: %d energy: %f volume: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume); /*evaluate if accepte or reject the configuration*/ /*acc = exp( (1/sim->temper - 1/(sim->temper + sim.dtemp)) * (E_here - E_received) + (sim->press /sim->temper - pressure_received /(sim.temper + sim->dtemp)) * (V_here - V_received) if pressure the same it it simplier*/ reject = FALSE; change = (1/sim->temper - 1/(sim->temper + sim->dtemp)) * (localmpi.energy - receivedmpi.energy); //printf("acceptance decision: change: %f localE: %f receivedE: %f tempf: %f \n",change,localmpi.energy,receivedmpi.energy,(1/sim->temper - 1/(sim->temper + sim->dtemp))); change += (sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp)) * (localmpi.volume - receivedmpi.volume); //printf("pressf: %f \n",(sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp))); if (sim->wlm[0] > 0) { localwl = sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]; receivedwl = receivedmpi.wl_order[0] + receivedmpi.wl_order[1]*sim->wl.length[0]; //fprintf(stdout,"decide wl %ld %ld %ld energychange: %f \n", receivedmpi.wl_order[0], receivedmpi.wl_order[1], receivedwl, change ); //fprintf(stdout,"local weights %ld %f %ld %f \n",localwl,sim->wl.weights[localwl],receivedwl,sim->wl.weights[receivedwl]); change += (-sim->wl.weights[localwl] + sim->wl.weights[receivedwl] )/sim->temper + ( -recwlweights[receivedwl] + recwlweights[localwl])/(sim->temper + sim->dtemp) ; //fprintf(stdout,"wlchange %f \n\n",change); } if ( (!(reject)) && ( (change > 0) || (ran2(&seed) < exp(change)) ) ) { /* Exchange ACCEPTED send local stuff*/ //printf("exchange accepted \n"); sim->mpiexch.acc++; localmpi.accepted = 1; conf->box = receivedmpi.box; conf->syscm = receivedmpi.syscm; edriftchanges = receivedmpi.energy - localmpi.energy; edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper; //printf("edrift %f\n",edriftchanges); if ( sim->wlm[0] > 0 ) { for (wli=0;wli<sim->wl.wlmdim;wli++) { sim->wl.neworder[wli] = receivedmpi.wl_order[wli]; } wlaccept(sim->wlm[0],&sim->wl); } MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD); //printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure); /*send and receive configuration*/ MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, count, MPI_COMM_WORLD); /* printf("send data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ MPI_Recv(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD,&status); /* printf("recieved data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ if ( sim->wlm[0] > 0 ) { //exchange wl data mesh size and radius hole s for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 2: /*it is complicated to send because of different sizes we would have to send sizes first and realocate corrrect mesh size and then send data it is better to recalculate (a bit slower though)*/ mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim); break; case 5: //radiushole_all(topo,conf,sim,wli,&(conf->syscm)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 6: //radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 7: //contparticles_all(topo,conf,sim,wli); MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); break; } } } } else { /*if exchange rejected send back info */ //printf("exchange rejected\n"); sim->mpiexch.rej++; MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD); if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; } } } } if ( (localmpi.accepted) && (sim->pairlist_update) ) gen_pairlist(topo, sim, conf); MPI_Type_free(&MPI_exchange); MPI_Type_free(&MPI_Particle); MPI_Type_free(&MPI_vector); free(recwlweights); #endif return edriftchanges; } /*int mpi_newdatatypes() { int i; struct vector vec; struct particles part; struct mpiexchangedata exch; MPI_Aint dispstart; MPI_Datatype MPI_vector; MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE}; int blocklen[3] = {1, 1, 1}; MPI_Aint disp[3]; MPI_Address( &vec, &dispstart); MPI_Address( &(vec.x), &disp[0]); MPI_Address( &(vec.y), &disp[1]); MPI_Address( &(vec.z), &disp[2]); for (i=0; i <3; i++) disp[i] -= dispstart; MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector); MPI_Type_commit( &MPI_vector); MPI_Datatype MPI_Particle; MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT}; int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,}; MPI_Aint disp2[11]; MPI_Address( &part, &dispstart); MPI_Address( &(part.pos), &disp2[0]); MPI_Address( &(part.dir), &disp2[1]); MPI_Address( &(part.patchdir), &disp2[2]); MPI_Address( &(part.patchsides), &disp2[3]); MPI_Address( &(part.chdir), &disp2[4]); MPI_Address( &(part.chaint), &disp2[5]); MPI_Address( &(part.chainn), &disp2[6]); MPI_Address( &(part.type), &disp2[7]); MPI_Address( &(part.switchtype), &disp2[8]); MPI_Address( &(part.delta_mu), &disp2[9]); MPI_Address( &(part.switched), &disp2[10]); for (i=0; i <11; i++) disp2[i] -= dispstart; MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle); MPI_Type_commit( &MPI_Particle); MPI_Datatype MPI_exchange; MPI_Datatype type3[5] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT}; int blocklen3[5] = {1, 1, 1, 1, 1}; MPI_Aint disp3[5]; MPI_Address( &exch, &dispstart); MPI_Address( &(exch.box), &disp3[0]); MPI_Address( &(exch.energy), &disp3[1]); MPI_Address( &(exch.volume), &disp3[2]); MPI_Address( &(exch.pressure), &disp3[3]); MPI_Address( &(exch.accepted), &disp3[4]); for (i=0; i <5; i++) disp3[i] -= dispstart; MPI_Type_struct( 5, blocklen3, disp3, type3, &MPI_exchange); MPI_Type_commit( &MPI_exchange); return 0; }*/ /*................................................................................*/ /*................................................................................*/ /*....................END OF MOVES, INTERACTION FUNCTIONS FOLLOW..................*/ /*................................................................................*/ /*..............................................................................*/ /* Determines total energy of two spherocylinders type PSC PSC */ double e_psc_psc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_psc(struct interacts *,int,int); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHPSC)||(interact->param->geotype[0] == TCHPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHPSC)||(interact->param->geotype[1] == TCHPSC) ) secondCH = TRUE; if (firstCH) interact->part1->dir = interact->part1->chdir[0]; if (secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_psc_psc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,1,0); } if ( (firstT) && (secondT) ) { if (secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,1,1); } if (secondT) { if (firstT && firstCH ) { interact->part1->dir = interact->part1->chdir[0]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of two spherocylinders type PSC PSC */ double eattractive_psc_psc(struct interacts * interact,int patchnum1,int patchnum2) { int i, intrs; double rcut, atrenergy, ndist; double v1, v2, f0, f1, f2, T1, T2, S1, S2, a; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector *, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int psc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; //interact->halfl = interact->param->half_len[0]; //DEBUG_SIM("halfl = %lf", interact->halfl); for(i=0;i<5;i++) intersections[i]=0; //cospatch = param.pcanglsw; //cospatchinr = param.pcangl; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ //DEBUG_SIM("first intersection"); intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); if (intrs <2){ //DEBUG_SIM("No intersection :("); return 0.0; /*sc is all outside patch, attractive energy is 0*/ } T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; //DEBUG_SIM("get vector"); vec1=vec_scale(interact->r_cm,-1.0); //DEBUG_SIM("second intersection"); intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ //fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); //fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; //atrenergy = -1.0; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); //printf("v1: %f v2: %f f0: %f f1: %f f2: %f ener: %f\n",v1,v2,f0,f1,f2,atrenergy); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /* a = r_ij * n_i */ double fanglscale(double a, struct ia_param * param, int which) { double f; // TODO for different types if (a <= param->pcanglsw[which]) f=0.0; else { if (a >= param->pcangl[which]) f=1.0; else { f = 0.5 - ((param->pcanglsw[which] + param->pcangl[which])*0.5 - a )/(param->pcangl[which] - param->pcanglsw[which]); } } return f; } /*CPSC..............................................................................*/ /* Determines total energy of two spherocylinders of type 3 -cylindrical psc -CPSC */ double e_cpsc_cpsc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_cpsc_cpsc(struct interacts *,int,int); //DEBUG_SIM("do energy 33") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHCPSC)||(interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHCPSC)||(interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_cpsc_cpsc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,1,0); } if ( (firstT) && (secondT) ) { if (secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,1,1); } if (secondT) { if (firstT && firstCH ) { interact->part1->dir = interact->part1->chdir[0]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of two spherocylinders of type 3 -cylindrical psc -CPSC */ double eattractive_cpsc_cpsc(struct interacts * interact, int patchnum1, int patchnum2) { int i, intrs; double rcut, atrenergy, v1, v2, f0, f1, f2, T1, T2, S1, S2, a, ndist; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int cpsc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; // interact->halfl = interact->param->half_len[0]; for(i=0;i<5;i++) intersections[i]=0; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; vec1=vec_scale(interact->r_cm,-1.0); intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ // fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); // fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of spherocylinders type PSC and CPSC */ double e_psc_cpsc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_cpsc(struct interacts *,int,int); //DEBUG_SIM("do energy 23") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ((interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == CHCPSC)|| (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ((interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == CHCPSC)|| (interact->param->geotype[1] == TCHPSC) || (interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_psc_cpsc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) || (interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) || (interact->param->geotype[1] == TCHCPSC) || (interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,1,0); } if ( (firstT) && (secondT) ) { if (secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,1,1); } if (secondT) { if (firstT && firstCH ) { interact->part1->dir = interact->part1->chdir[0]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of spherocylinders type PSC and CPSC */ double eattractive_psc_cpsc(struct interacts * interact,int patchnum1,int patchnum2) { int i, intrs; double rcut, atrenergy, ndist; double v1, v2, f0, f1, f2, T1, T2, S1, S2, a; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int psc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum); int cpsc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; //interact->halfl = interact->param->half_len[0]; //DEBUG_SIM("halfl = %lf", interact->halfl); for(i=0;i<5;i++) intersections[i]=0; BOOL first; if ( (interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)||(interact->param->geotype[0] == TPSC)||(interact->param->geotype[0] == TCHPSC) ){ first = TRUE; } else { first = FALSE; } //cospatch = param.pcanglsw; //cospatchinr = param.pcangl; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ //DEBUG_SIM("first intersection"); if (first) { intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); } else { intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); } //DEBUG_SIM("first intersection: done"); if (intrs <2){ //DEBUG_SIM("No intersection :("); return 0.0; /*sc is all outside patch, attractive energy is 0*/ } T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; //DEBUG_SIM("get vector"); vec1=vec_scale(interact->r_cm,-1.0); //DEBUG_SIM("second intersection"); if (first) { intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); } else { intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); } if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ // fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); // fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; //atrenergy = -1.0; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /*..............................................................................*/ /* * Determines total energy of spherocylinder type 1 and sphere type 11 */ double e_spa_sca(struct interacts * interact) { double atrenergy, repenergy, b, f0, halfl; struct vector vec_perpproject(struct vector *, struct vector *); void normalise(struct vector *); void closestdist(struct interacts *); double erepulsive(struct interacts *); double fanglscale(double, struct ia_param *, int which); //DEBUG printf ("do energy 111 \n\n"); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function for the length of spherocylinder within cutoff*/ if (interact->param->geotype [0] < SP) halfl = interact->param->half_len[0]; else halfl = interact->param->half_len[1]; b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z); //exit(1); } return repenergy+atrenergy; } /*..............................................................................*/ /* * Determines total energy of spherocylinder type 2 and sphere type 11 */ double e_psc_spa(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_spa(struct interacts *, int); //DEBUG_SIM("do energy 211") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_psc_spa(interact,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_spa(interact,1); } if (secondT) { if(secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_spa(interact,1); } if ( (firstT) && (secondT) ) { fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n"); exit(1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* * Determines attractive energy of spherocylinder type 2 and sphere type 11 */ double eattractive_psc_spa(struct interacts * interact, int patchnum1) { double atrenergy, a, b, f0, halfl; struct vector vec1; struct vector vec_perpproject(struct vector *, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); int which; /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function: angular dependence of patch1*/ if (interact->param->geotype[0] < SP) { which = 0; vec1=vec_perpproject(&interact->distvec, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); halfl=interact->param->half_len[0]; } else { which = 1; vec1=vec_perpproject(&interact->distvec, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum1]); halfl=interact->param->half_len[1]; } /*scaling function for the length of spherocylinder within cutoff*/ b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= fanglscale(a,interact->param, which)*f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z); //exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of spherocylinder type 3 and sphere type 11 */ double e_cpsc_spa(struct interacts * interact) { double atrenergy, repenergy, halfl; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_cpsc_spa(struct interacts *,int); //DEBUG_SIM("do energy 311") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if (interact->param->geotype[0] < SP) { halfl=interact->param->half_len[0]; } else { halfl=interact->param->half_len[1]; } if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) || ( interact->dist > interact->param->rcut ) || (interact->contt > halfl) || (interact->contt < -halfl) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHCPSC) || (interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_cpsc_spa(interact,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,1,0); } if (secondT) { if(secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,0,1); } if ( (firstT) && (secondT) ) { fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n"); exit(1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of spherocylinder type 3 and sphere type 11 */ double eattractive_cpsc_spa(struct interacts * interact,int patchnum1) { double atrenergy, a, b, f0, halfl; struct vector vec1; int which; struct vector vec_perpproject(struct vector *, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); /*if it is in cylindrical part c>-halfl and c<halfl*/ /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function: angular dependence of patch1*/ if (interact->param->geotype[0] < SP) { which = 0; vec1=vec_perpproject(&interact->distvec, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); halfl = interact->param->half_len[0]; } else { which = 1; vec1=vec_perpproject(&interact->distvec, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum1]); halfl = interact->param->half_len[1]; } /*scaling function for the length of spherocylinder within cutoff*/ b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= fanglscale(a,interact->param, which)*f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction311 %.8f a: %.8f\n",atrenergy,a); //exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of two spherocylinders type 11 */ double e_2sca_or_2spa(struct interacts * interact) { double repenergy, atrenergy; double erepulsive(struct interacts *); void closestdist(struct interacts *); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } } return repenergy+atrenergy; } /*..............................................................................*/ /* Determines total energy with purely repulsive types */ double e_spn_or_scn(struct interacts * interact) { double repenergy; double erepulsive(struct interacts *); void closestdist(struct interacts *); closestdist(interact); repenergy = erepulsive(interact); return repenergy; } /*..............................................................................*/ /* Determines repulsive energy of two spherocylinders */ double erepulsive(struct interacts * interact) { double repenergy, en6; /* WCA repulsion */ if (interact->dist > interact->param->rcutwca) repenergy = 0.0; else { en6 = pow((interact->param->sigma/interact->dist),6); repenergy = 4*en6*(en6-1) + 1.0; } //printf("repenergy: %f dist: %f\n",repenergy, interact->dist); return repenergy; } /*..............................................................................*/ /* Indicates not yet programmed interaction */ double enoexist(struct interacts * interact) { double energy=0.0; fprintf (stderr, "ERROR: We have not programed interaction of types %d and %d\n", interact->part1->type,interact->part2->type); exit (1); return energy; } /* function for calculation of harmonic potential*/ double harmonic(double aktualvalue, double eqvalue, double springconst) { return springconst*(aktualvalue-eqvalue)*(aktualvalue-eqvalue)*0.5; } /*..............................................................................*/ /* Determines bond energy */ double bondenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf) { double energy=0.0, bondlength, halfl; struct vector vec1, vec2, vecbond; int * geotype = interact->param->geotype; struct vector image(struct vector, struct vector, struct vector); double harmonic(double, double, double); /*interaction with nearest neighbours -harmonic*/ if ((topo->chainparam[conf->particle[num1].chaint]).bond1c >= 0) { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[1] < SP) halfl=interact->param->half_len[1]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); } } else { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); } } } } /*interaction with second nearest neighbours -harmonic*/ if (topo->chainparam[conf->particle[num1].chaint].bond2c >= 0) { if (num2 == topo->conlist[num1][2]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); else { vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); } } else { if (num2 == topo->conlist[num1][3]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); else { vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); } } } } /*interaction with nearest neighbours - direct harmonic bond*/ if ((topo->chainparam[conf->particle[num1].chaint]).bonddc > 0) { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bonddeq,topo->chainparam[conf->particle[num1].chaint].bonddc); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[1] < SP) halfl=interact->param->half_len[1]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ; vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ; vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc); } } else { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ; vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ; vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ; if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc); } } } } //printf("bondlength: %f\n",bondlength); // printf("bondener: %f\n",energy); return energy; } /*..............................................................................*/ /* Determines angle energy between spherocylinders */ double angleenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf) { double energy=0.0, currangle, halfl; struct vector vec1, vec2; int * geotype = interact->param->geotype; struct vector image(struct vector, struct vector, struct vector); void normalise(struct vector *); double harmonic(double, double, double); /*angle interaction with nearest neighbours -harmonic*/ if ((topo->chainparam[conf->particle[num1].chaint]).angle1c >= 0) { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) /*spheres do not have this interaction*/ energy += 0.0; else { if (geotype[0] < SP) vec1 = conf->particle[num1].dir; else { halfl=interact->param->half_len[1]; //sphere angle is defined versus the end of spherocylinder vec1.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vec1 = image(vec1, conf->particle[num1].pos, conf->box); } if (geotype[1] < SP) vec2 = conf->particle[num2].dir; else { halfl=interact->param->half_len[0]; vec2.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z; vec2 = image(vec2, conf->particle[num2].pos, conf->box); } normalise(&vec1); normalise(&vec2); currangle = acos(DOT(vec1,vec2)); energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle1eq,topo->chainparam[conf->particle[num1].chaint].angle1c); } } else { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) /*spheres do not have this interaction*/ energy += 0.0; else { if (geotype[0] < SP) vec1 = conf->particle[num1].dir; else { halfl=interact->param->half_len[1]; //sphere angle is defined versus the end of spherocylinder vec1.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z; vec1 = image(vec1, conf->particle[num1].pos, conf->box); } if (geotype[1] < SP) vec2 = conf->particle[num2].dir; else { halfl=interact->param->half_len[0]; vec2.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; vec2 = image(vec2, conf->particle[num2].pos, conf->box); } normalise(&vec1); normalise(&vec2); currangle = acos(DOT(vec1,vec2)); energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle1eq,topo->chainparam[conf->particle[num2].chaint].angle1c); } } } } /*interaction between the orientation of spherocylinders patches -harmonic*/ if (topo->chainparam[conf->particle[num1].chaint].angle2c >= 0) { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] < SP) && (geotype[1] < SP) ) { currangle = acos(DOT(conf->particle[num1].patchdir[0],conf->particle[num2].patchdir[0]) - DOT(conf->particle[num1].dir,conf->particle[num2].patchdir[0]) ); energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle2eq,topo->chainparam[conf->particle[num1].chaint].angle2c); } else { energy += 0.0; } } else { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] < SP) && (geotype[1] < SP) ) { currangle = acos(DOT(conf->particle[num2].patchdir[0],conf->particle[num1].patchdir[0]) - DOT(conf->particle[num2].dir,conf->particle[num1].patchdir[0]) ); energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle2eq,topo->chainparam[conf->particle[num2].chaint].angle2c); } else { energy += 0.0; } } } } // printf("angleener: %f\n",energy); return energy; } /* cluses distance calculation*/ void closestdist(struct interacts * interact) { double c, d, halfl; struct vector mindist_segments(struct vector dir1, double halfl1, struct vector dir2, double halfl2, struct vector r_cm); double linemin(double, double); //printf("we have %d %d ",interact->param->geotype[0],interact->param->geotype[1] ); if ((interact->param->geotype[0] >= SP) && (interact->param->geotype[1] >= SP)) { /*we have two spheres - most common, do nothing*/ //printf("we have two spheres "); interact->distvec = interact->r_cm; interact->dist = sqrt(interact->dotrcm); interact->distcm = interact->dist; } else { if ((interact->param->geotype[0] < SP) && (interact->param->geotype[1] < SP)) { /*we have two spherocylinders*/ interact->distvec = mindist_segments(interact->part1->dir,interact->param->half_len[0], interact->part2->dir, interact->param->half_len[1], interact->r_cm); interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } else { if (interact->param->geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ halfl=interact->param->half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(interact->part1->dir,interact->r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } interact->contt = c; interact->distvec.x = - interact->r_cm.x + interact->part1->dir.x * d; interact->distvec.y = - interact->r_cm.y + interact->part1->dir.y * d; interact->distvec.z = - interact->r_cm.z + interact->part1->dir.z * d; interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ halfl=interact->param->half_len[1]; /*finding closest vector from sphyrocylinder to sphere*/ c = DOT(interact->part2->dir,interact->r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } interact->contt = -c; interact->distvec.x = interact->r_cm.x - interact->part2->dir.x * d; interact->distvec.y = interact->r_cm.y - interact->part2->dir.y * d; interact->distvec.z = interact->r_cm.z - interact->part2->dir.z * d; interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } } } } /*..............................................................................*/ /* Determines energy of two particles */ double paire(long num1, long num2, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf) { double energy=0.0; /* energy*/ struct vector r_cm; /* Vector between centres of mass from part2 to part1*/ struct interacts interact; /*interaction parameters*/ double bondenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf); double angleenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf); /*Placing interactin particle in unit box and finding vector connecting CM*/ /*r_cm = image(part1.pos, part2.pos, box); explicit statement below for performance optimization*/ r_cm.x = conf->particle[num1].pos.x - conf->particle[num2].pos.x; r_cm.y = conf->particle[num1].pos.y - conf->particle[num2].pos.y; r_cm.z = conf->particle[num1].pos.z - conf->particle[num2].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); interact.dotrcm = DOT(r_cm,r_cm); if ( interact.dotrcm > topo->sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */ interact.r_cm=r_cm; interact.contt = 0; interact.distvec.x = 0; interact.distvec.y = 0; interact.distvec.z = 0; interact.box = conf->box; interact.part1 = &conf->particle[num1]; interact.part2 = &conf->particle[num2]; interact.param = topo->ia_params[conf->particle[num1].type] + conf->particle[num2].type; if(intfce[conf->particle[num1].type][conf->particle[num2].type] == NULL){ fprintf(stderr, "interaction function for type %d and %d not defined!\n", conf->particle[num1].type, conf->particle[num2].type); } energy = (*intfce[conf->particle[num1].type][conf->particle[num2].type])( &interact); //printf("num: %ld %ld e: %f dist: %f",num1,num2,energy,interact.dist); energy += bondenergy ( num1, num2, &interact, topo, conf); energy += angleenergy ( num1, num2, &interact, topo, conf); //printf(" e: %f\n",energy); return energy; } /*...........................................................................*/ /*Calculates interaction of target particle and external field version 2 calculate projection of spherocylinder in direction of patch and calculate interacting line segment within cutoff */ double extere2 (long target, struct topo * topo, struct conf * conf) { double repenergy=0.0,atrenergy=0.0; /* energy*/ double rcmz; /* z distance between*/ double ndist; /* distance for CM of interacting line segment*/ double interendz; /* z coordinate of interaction end*/ struct interacts interact; /* interaction parameters*/ double orient; double halfl; BOOL positive, orientin; struct vector olddir; struct vector project; /*vector for projection down to plane */ double erepulsive(struct interacts *); // struct vector vec_perpproject(struct vector*, struct vector*); // void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project); double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int, double ); /* calcualte distance to center of mass*/ if ( conf->particle[target].pos.z < 0 ) { rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z - 0.5) ) ); } else { rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z + 0.5) ) ); } project.x=0; project.y=0; if (rcmz < 0) { interact.dist = -rcmz; positive = FALSE; interendz = -1.0; project.z = 1.0; } else { interact.dist = rcmz; positive = TRUE; interendz = 1.0; project.z = -1.0; } interact.dotrcm = rcmz * rcmz; if ( interact.dotrcm > topo->exter.sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */ interact.distvec.z = interact.r_cm.z; interact.distcm = interact.dist; interact.box = conf->box; interact.part1 = &conf->particle[target]; interact.param = &topo->exter.interactions[conf->particle[target].type]; halfl = 0.5* topo->exter.interactions[conf->particle[target].type].len[0]; ndist = interact.dist; orientin = TRUE; orient = 0.0; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); /* now we have closest distance so we can calculate repulsion*/ repenergy = erepulsive(&interact); //printf("dist: %f",interact.dist); /*save chiral stuff*/ olddir = interact.part1->dir; if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)) { interact.part1->dir = interact.part1->chdir[0]; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); } if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) || ( (interact.part1->patchdir[0].z >0)&&(positive) ) || ( (interact.part1->patchdir[0].z <0)&&(!(positive)) ) ) atrenergy = 0.0; else { atrenergy = exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,0,halfl); } if ((interact.param->geotype[0] == TCPSC)||(interact.param->geotype[0] == TPSC)|| (interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) { if ((interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) { interact.part1->dir = interact.part1->chdir[1]; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); } exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) || ( (interact.part1->patchdir[1].z >0)&&(positive) ) || ( (interact.part1->patchdir[1].z <0)&&(!(positive)) ) ) atrenergy += 0.0; else { atrenergy += exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,1,halfl); } } if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)|| (interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC) ) { interact.part1->dir = olddir; } //printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,repenergy+atrenergy); return repenergy+atrenergy; } double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch, double halfl) { struct vector pbeg,pend; /* projected spherocylinder begining and end*/ double a,length1,length2, f0,f1; struct vector cm1,cm2; /* centrar of interacting segments */ int line; struct vector partbeg,partend; /*closest and furthest point of particle*/ struct vector inters; double atrenergy=0.0; int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz, double * cut, struct vector* partbeg, struct vector* partend); int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend); /*interaction with PATCHY SPHEROCYLINDERS*/ if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) { //printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z); //printf("patchdir: %f %f %f \n ",interact->part1->patchdir[0].x,interact->part1->patchdir[0].y,interact->part1->patchdir[0].z); /* calculate position of closest and furthest point (begining and end of spherocylinder)*/ a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */ partbeg.x = a * interact->part1->dir.x * halfl; partbeg.y = a * interact->part1->dir.y * halfl; partbeg.z = *rcmz + a * interact->part1->dir.z *halfl; partend.x = - a * interact->part1->dir.x * halfl; partend.y = - a * interact->part1->dir.y * halfl; partend.z = *rcmz - a * interact->part1->dir.z * halfl; //printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z); /*calculate interacting line segment and its cm of spherocylinder*/ /*calculate end point z*/ if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < 2.0*halfl ){ /*if cutoff goes through spherocylinder the end point is at cutoff*/ *interendz *= interact->param->rcut; } else { /*endpoint is at the end of spherocylinders*/ *interendz = partend.z; } /*calculate CM of interacting line segment of spherocylinder*/ if (*positive) { cm1.z = AVER(*interendz,interact->dist); } else { cm1.z = AVER(*interendz,-interact->dist); } if (interact->part1->dir.z != 0.0 ) { a = (*interendz - cm1.z ) / interact->part1->dir.z; length1= -orient*2.0*a; a = a + orient*halfl; } else { a = 0.0; length1 = 2.0*halfl; } //printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact.dist); cm1.x = interact->part1->dir.x * a; cm1.y = interact->part1->dir.y * a; /* we have interacting segment*/ if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) { /*CPSC type*/ if ( ((*interendz >= interact->dist)&&(*positive)) || ((*interendz <= -interact->dist)&&(!(*positive))) ){ /*test if projection is not all out of interaction*/ line = cpsc_wall(&pbeg,&pend,project,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } else { line = 0; } } else { /*PSC and CHPSC interaction with wall */ line = psc_wall(&pbeg,&pend,project,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } if (line > 0) { /*cm2 by average begining and end*/ cm2.x = AVER(pbeg.x,pend.x); cm2.y = AVER(pbeg.y,pend.y); cm2.z = 0.0; /*length by size of end-benining*/ length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) ); inters.x = cm2.x - cm1.x; inters.y = cm2.y - cm1.y; inters.z = cm2.z - cm1.z; //printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z); *ndist = sqrt(DOT(inters,inters)); if (*ndist < interact->param->pdis) { atrenergy = -interact->param->epsilon; } else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /* scaling function1: dependence on the length of intersetions plus*/ f0=(length1 + length2)*0.5; /*scaling with angle*/ f1 = fabs(interact->part1->patchdir[numpatch].z); atrenergy *= f0*f1; //printf(" %f %f %f %f %f %f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist); //printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y); } else { atrenergy = 0.0; } } else { if (*ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */ atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ; } return atrenergy; } void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project) { if (*rcmz < 0) { interact->dist = -(*rcmz); *positive = FALSE; *interendz = -1.0; project->z = 1.0; } else { interact->dist = (*rcmz); *positive = TRUE; *interendz = 1.0; project->z = -1.0; } /*psc closest is allways end closer to wall*/ if (interact->param->geotype[0] < SP ){ /*calculate closest point distance*/ if (interact->part1->dir.z > 0) { if (*positive) { *orientin = FALSE; *orient = -1.0; interact->dist = *rcmz -interact->part1->dir.z * interact->param->half_len[0]; } else { *orientin = TRUE; *orient = 1.0; interact->dist = -( *rcmz + interact->part1->dir.z * interact->param->half_len[0]); } } else { if (*positive) { *orientin = TRUE; *orient = 1.0; interact->dist = *rcmz + interact->part1->dir.z * interact->param->half_len[0]; } else { *orientin = FALSE; *orient = -1.0; interact->dist = -( *rcmz -interact->part1->dir.z * interact->param->half_len[0]); } } } } /*...........................................................................*/ /*Calculates interaction of target particle and external field calculate projection of patch of spherocylinder on wall evaluate intersection area and calculate interaction from that */ double exter_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch,double halfl) { double area,a,b,c,r2; double atrenergy=0.0; /* energy*/ BOOL countend; struct vector cm1,cm2; /* centrar of interacting segments */ struct vector pbeg,pend; /* projected spherocylinder begining and end*/ struct vector inters,newdir; struct vector pbeg1,pend1,pbeg2,pend2,pextr1,pextr2,pextr3,pextr4; /*additinal point of projected patch for calculation of area */ double length1, cuttoproject, f0; int line, line1, line2,extra; struct vector partbeg,partend; /*closest and furthest point of particle*/ double erepulsive(struct interacts *); struct vector vec_perpproject(struct vector*, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); struct vector vec_create(double, double, double); double areaeightpoints(struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*); int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz, double * cut, struct vector* partbeg, struct vector* partend); int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend); int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4, struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg, struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin); void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project); /*interaction with PATCHY SPHEROCYLINDERS*/ if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) { //printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z); //printf("patchdir: %f %f %f \n ",interact->part1->patchdir[numpatch].x,interact->part1->patchdir[numpatch].y,interact->part1->patchdir[numpatch].z); /* calculate position of closest and furthest point (begining and end of spherocylinder)*/ a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */ partbeg.x = a * interact->part1->dir.x * halfl; partbeg.y = a * interact->part1->dir.y * halfl; partbeg.z = *rcmz + a * interact->part1->dir.z * halfl; partend.x = - a * interact->part1->dir.x * halfl; partend.y = - a * interact->part1->dir.y * halfl; partend.z = *rcmz - a * interact->part1->dir.z * halfl; //printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z); /*calculate interacting line segment and its cm of spherocylinder*/ /*calculate end point z*/ if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < halfl*2.0 ){ /*if cutoff goes through spherocylinder the end point is at cutoff*/ *interendz *= interact->param->rcut; } else { /*endpoint is at the end of spherocylinders*/ *interendz = partend.z; } /*calculate CM of interacting line segment of spherocylinder*/ if (*positive) { cm1.z = AVER(*interendz,interact->dist); } else { cm1.z = AVER(*interendz,-interact->dist); } if (interact->part1->dir.z != 0.0 ) { a = (*interendz - cm1.z ) / interact->part1->dir.z; length1= -orient*2.0*a; a = a + orient*halfl; } else { a = 0.0; length1 = 2.0*halfl; } //printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact->dist); cm1.x = interact->part1->dir.x * a; cm1.y = interact->part1->dir.y * a; /*calculate projection on wall as infinite line and make it interacting segment*/ if (interact->part1->patchdir[numpatch].z != 0) { cuttoproject = -interact->param->rcut*interact->part1->patchdir[numpatch].z; /*z coordinate of point where projection is in cut distance*/ if ( ((partend.z < cuttoproject)&&(*positive)) || ((cuttoproject < partend.z)&&(!(*positive))) ){ cuttoproject = partend.z; } } else { cuttoproject = partbeg.z; } //printf("cutproject %f \n",cuttoproject); //printf("cm1 %f %f %f \n",cm1.x, cm1.y,cm1.z ); /* we have interacting segment*/ if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) { /*CPSC type*/ if ( ((cuttoproject >= interact->dist)&&(*positive)) || ((cuttoproject <= -interact->dist)&&(!(*positive))) ){ /*test if projection is not all out of interaction*/ line = cpsc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } else { line = 0; } } else { /*PSC and CHPSC interaction with wall */ line = psc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } if (line > 0) { area = 0.0; /*project cutoff boudaries*/ if (line == 2 ) { /*if projection end is on sphere of begining don't care about cylinder cutoff*/ extra = 0; } else { extra = cutprojectatwall(&pextr1, &pextr2, &pextr3, &pextr4, &interact->part1->patchdir[numpatch], \ &interact->part1->dir, &interact->param->rcut, &partbeg, &partend,&pend,&cuttoproject,orientin); } //printf("extr1: %d %f %f extr2 %f %f extr3 %f %f extr4 %f %f \n",extra,pextr1.x,pextr1.y,pextr2.x,pextr2.y,pextr3.x,pextr3.y,pextr4.x,pextr4.y); /*project patch boundaries on the first side*/ newdir=interact->part1->patchsides[0+2*numpatch]; line1 = cpsc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) { line1 = psc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); } //printf("line1: %d beg1 %f %f end1 %f %f \n",line1,pbeg1.x,pbeg1.y,pend1.x,pend1.y); /*project patch boundaries on the second side*/ newdir=interact->part1->patchsides[1+2*numpatch]; line2 = cpsc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) { line2 = psc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); } //printf("line2: %d beg2 %f %f end2 %f %f \n",line2,pbeg2.x,pbeg2.y,pend2.x,pend2.y); /*calculate area*/ if (extra == 0) { /*thish should only happen when there is PSC interacting only with end*/ if (line1 == 0) { if (line2==0) { /*circle around middle-pbeg*/ area = PI*( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); } else{ /* circle around middle-pbeg minus circle segment*/ a = AVER(pbeg2.x,pend2.x); b = AVER(pbeg2.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c); } } else { if (line2==0) { /* circle around middle-pbeg minus circle segment*/ a = AVER(pbeg1.x,pend1.x); b = AVER(pbeg1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c); } else { //area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */ /*circle minus two circle segments*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*PI; a = AVER(pbeg1.x,pend1.x); b = AVER(pbeg1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c); a = AVER(pbeg2.x,pend2.x); b = AVER(pbeg2.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c); } } } else { b = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend.y)- (pextr2.x-pend.x)*(pextr4.y-pextr2.y));/*pend on 42*/ c = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend.y)- (pextr3.x-pend.x)*(pextr1.y-pextr3.y));/*pend on 13*/ if ( ( b< ZEROTOL) || ( c< ZEROTOL) ) countend = FALSE; else countend = TRUE; if (line1 == 0) { if (line2 == 0) { if ( countend ) { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pextr1,NULL,NULL);/* B 2 4 E 3 1 */ } else area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pextr1,NULL,NULL,NULL);/* B 2 4 3 1 */ } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend2 on 42*/ { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */ } } else { a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend2.y)- (pextr3.x-pend2.x)*(pextr1.y-pextr3.y)); if ( a< ZEROTOL) /*pend2 on 13*/ { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr1,NULL,NULL,NULL,NULL); /* B B2 E2 1 */ } else { /*pend2 on 34 or on begining sphere of psc*/ if (line2 == 2) { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */ } } else { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pextr1,NULL,NULL,NULL); /* B B2 E2 3 1 */ } } } } } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend1.y)- (pextr2.x-pend1.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend1 on 42*/ { if (line2 == 0) { area = areaeightpoints(&pbeg,&pextr2,&pend1,&pbeg1,NULL,NULL,NULL,NULL); /* B 2 E1 B1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } } else { a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend1.y)- (pextr3.x-pend1.x)*(pextr1.y-pextr3.y)); if ( a< ZEROTOL) /*pend1 on 13*/ { if (line2 == 0) { if (countend) { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1 */ } else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1 */ } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend2 on 42*/ { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { a = fabs((pextr3.x-pextr1.x)*(pextr1.y-pend2.y)- (pextr1.x-pend2.x)*(pextr3.y-pextr1.y)); if ( a< ZEROTOL) /*pend2 on 31*/ { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } else { /*pend2 close to 34 or on begining sphere of psc*/ if (line2 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */ } } } } } else {/*pend1 close to 34 or on beging sphere for psc*/ if (line2 == 0) { if (line1 ==2) { if (countend) area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1*/ else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1*/ } } else { if (countend) area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pend1,&pbeg1,NULL,NULL); /* B 2 4 E E1 B1*/ else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend1,&pbeg1,NULL,NULL,NULL); /* B 2 4 E1 B1*/ } } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /* pend2 on 42 */ { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */ } else { /*pend1 and pend2 close to 34 or on beging sphere for psc*/ if (line2 == 2) { if (line1 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */ } } else { if (line1 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } } } } } } } /*extra != 0*/ if ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) { if (line1==2) { /* add circle segment*/ a = AVER(pextr1.x,pend1.x); /*end to cutoff - pextr1 ,pend1 */ b = AVER(pextr1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pend1.x)*(partbeg.x-pend1.x) + (partbeg.y-pend1.y)*(partbeg.y-pend1.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); a = AVER(pbeg.x,pbeg1.x); /* between beginings - pbeg ,pbeg1 */ b = AVER(pbeg.y,pbeg1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } else { if (line1==0) { /* add circle segment*/ a = AVER(pextr1.x,pbeg.x); /* begining to cutoff*/ b = AVER(pextr1.y,pbeg.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } } if (line2==2) { /* add circle segment*/ a = AVER(pextr3.x,pend2.x); /*end to cutoff - pextr3 ,pend2 */ b = AVER(pextr3.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pend2.x)*(partbeg.x-pend2.x) + (partbeg.y-pend2.y)*(partbeg.y-pend2.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); a = AVER(pbeg.x,pbeg2.x); /* between beginings - pbeg ,pbeg2 */ b = AVER(pbeg.y,pbeg2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } else { if (line2==0) { /* add circle segment*/ a = AVER(pextr3.x,pbeg.x); /* begining to cutoff*/ b = AVER(pextr3.y,pbeg.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } } } } /*area finished*/ /*cm2 by average begining and end*/ cm2.x = AVER(pbeg.x,pend.x); cm2.y = AVER(pbeg.y,pend.y); cm2.z = 0.0; /*length by size of end-benining*/ //length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) ); inters.x = cm2.x - cm1.x; inters.y = cm2.y - cm1.y; inters.z = cm2.z - cm1.z; //printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z); *ndist = sqrt(DOT(inters,inters)); if (*ndist < interact->param->pdis) { atrenergy = -interact->param->epsilon; } else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /* scaling function1: dependence on the length of intersetions plus SCALING WITH AREA*/ f0=(length1 + area / interact->param->sigma)*0.5; atrenergy *= f0; //printf(" %f %f %f %f %f %f %f %d %d %d \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist,extra,line1,line2); //printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y); //printf("%f %f %f %f %f %f\n",pbeg2.x,pend2.y,pextr2.x,pextr2.y,pextr1.x,pextr1.y); } else { atrenergy = 0.0; } } else { if (*ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */ atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ; } //printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy); return atrenergy; } /*..............................................................................*/ /* Initializes the array with the pointers to the energy function */ void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo){ // NB // Fill in the names of the functions for calculating the // interaction energy long geotype, other_geotype; int i, j; for(i = 0; i < MAXT; i++){ for(j = 0; j < MAXT; j++){ /* Initialize them as not existing */ intfce[i][j] = &enoexist; geotype = topo->ia_params[i][j].geotype[0]; other_geotype = topo->ia_params[i][j].geotype[1]; if ( ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) && (other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ) || ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) && (other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ) ) { intfce[i][j] = &e_psc_cpsc; } if ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) && (other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ){ intfce[i][j] = &e_cpsc_cpsc; } if ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) && (other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ){ intfce[i][j] = &e_psc_psc; } if(geotype == SCN || geotype == SPN || other_geotype == SCN || other_geotype == SPN){ intfce[i][j] = &e_spn_or_scn; } if((geotype == SCA && other_geotype == SCA) || (geotype == SPA && other_geotype == SPA)){ intfce[i][j] = &e_2sca_or_2spa; } if((geotype == SCA && other_geotype == SPA) || (geotype == SPA && other_geotype == SCA)){ intfce[i][j] = &e_spa_sca; } if(( (geotype == PSC || geotype == CHPSC || geotype == TCHPSC || geotype == TPSC) && other_geotype == SPA) || (geotype == SPA && (other_geotype == PSC||other_geotype == CHPSC || other_geotype == TCHPSC || other_geotype == TPSC) )){ intfce[i][j] = &e_psc_spa; } if(( (geotype == CPSC ||geotype == CHCPSC || geotype == TCHCPSC || geotype == TCPSC) && other_geotype == SPA) || (geotype == SPA && (other_geotype == CPSC||other_geotype == CHCPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) )){ intfce[i][j] = &e_cpsc_spa; } } } } /*..............................................................................*/ /* Compare energy change to temperature and based on Boltzmann probability return either 0 to accept or 1 to reject the move */ int movetry(double energyold, double energynew, double temperature) { double ran2(long *); /*DEBUG printf (" Move trial: %13.8lf %13.8lf %13.8lf %13.8lf\n", energynew, energyold, temperature, ran2(&seed));*/ if (energynew <= energyold ) { return 0; } else { if (exp(-1.0*(energynew-energyold)/temperature) > ran2(&seed)) { return 0; } else { return 1; } } } /*..............................................................................*/ /* * Calculate the different energy contributions. This is a merge of the different * energy calculation functions (energyone, -chain, -all) * 0: all * 1: one * 2: chain */ double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim, int chainnum) { long i=0,j=0; double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); // double extere(long, struct topo * topo, struct conf * conf); double extere2(long, struct topo * topo, struct conf * conf); //DEBUG_SIM("Calculate the energy with mode %d", mode) double energy = 0; /* Calculates energy between particle "target" and the rest. Returns energy */ if(mode == 1){ if (sim->pairlist_update) { #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < sim->pairlist[target].num_pairs; i++){ energy+= paire(target, sim->pairlist[target].pairs[i], intfce, topo, conf); } } else{ #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < target; i++) { energy+= paire(target, i, intfce, topo, conf); } #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = target + 1; i < topo->npart; i++) { energy+= paire(target, i, intfce, topo, conf); } } /*add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(target,topo,conf); } /* * Calculates energy between particle "target" and the rest. skipping * particles from the given chain -particles has to be sorted in chain!! * so similar to energy one but with chain exception */ else if(mode == 2){ //#ifdef OMP //#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) //#endif for (i = 0; i < target; i++) { if (i != topo->chainlist[chainnum][j]) { energy+= paire(target, i, intfce, topo, conf); } else { j++; } } j++; //#ifdef OMP //#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) //#endif for (i = target + 1; i < topo->npart; i++) { if (i != topo->chainlist[chainnum][j]) { energy+= paire(target, i, intfce, topo, conf); } else { j++; } } /*add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(target,topo,conf); } /* Calculates energy between all pairs. Returns energy */ else if(mode == 0){ #ifdef OMP #pragma omp parallel for private(i,j) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < topo->npart - 1; i++) { for (j = i + 1; j < topo->npart; j++) { energy+= paire(i, j, intfce, topo, conf); } /*for every particle add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(i,topo,conf); } /*add interaction of last particle with external potential*/ if (topo->exter.exist) energy+= extere2(topo->npart-1,topo,conf); } else { fprintf(stderr, "ERROR: Wrong mode (%d) was given to calc_energy!", mode); return 0.0; } // DEBUG_SIM("Will return energy from calc_energy") //printf("energymove %f\n",energy); return energy; } /*..............................................................................*/ /* Checks for overlaps between particle "target" and the rest. Returns 1 if overlap detected, 0 otherwise. */ int forbidden(long npart, struct particles *particle, long target, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { long test; int overlap(struct particles, struct particles, struct vector,struct ia_param [MAXT][MAXT]); for (test=0; test<npart; test++) { if (test != target) { if ( overlap(particle[target], particle[test], box, ia_params) ) { return 1; } } } return 0; } /*..............................................................................*/ /* Checks for overlaps between all pairs of particles. Returns 1 if overlap detected, 0 otherwise. */ int checkall(long npart, struct particles *particle, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { long i, j; int overlap(struct particles, struct particles, struct vector, struct ia_param [MAXT][MAXT]); for (i=0; i<npart-1; i++) { for (j=i+1; j<npart; j++) { if ( overlap(particle[i], particle[j], box, ia_params) ) { return 1; } } } return 0; } /*..............................................................................*/ /* Optimize the maximum displacement within the specified limits and resets the acceptance counters to zero. */ void optimizestep(struct disp *x, double hi, double lo) { double newrmsd; newrmsd = (*x).mx * RATIO(*x); if ((*x).oldrmsd > 0) { if ( newrmsd < (*x).oldrmsd ) { if ( (*x).oldmx > 1 ) { (*x).mx /= 1.05; (*x).oldmx = 0.95; } else { (*x).mx *= 1.05; (*x).oldmx = 1.05; } } else { if ( (*x).oldmx > 1 ) { (*x).mx *= 1.05; (*x).oldmx = 1.05; } else { (*x).mx /= 1.05; (*x).oldmx = 0.95; } } } if (newrmsd > 0 ) (*x).oldrmsd = newrmsd; else { (*x).oldrmsd = 0.0; (*x).mx /= 1.05; (*x).oldmx = 0.95; } if ( (*x).mx > hi ) (*x).mx = hi; if ( (*x).mx < lo ) (*x).mx = lo; (*x).acc = (*x).rej = 0; } /*..............................................................................*/ /* Optimize the maximum rotation within the specified limits and resets the acceptance counters to zero. Rotation is given by cos of angle larger rotation = smaller cos */ void optimizerot(struct disp *x, double hi, double lo) { double newrmsd; newrmsd = (*x).mx * RATIO((*x)) ; if ((*x).oldrmsd > 0) { if ( newrmsd > (*x).oldrmsd ) { if ( (*x).oldmx > 1) { (*x).mx *= 0.99; (*x).oldmx *= 0.99; } else { (*x).mx *= 1.01; (*x).oldmx *= 1.01; } } else { if ( (*x).oldmx > 1) { (*x).mx *= 1.01; (*x).oldmx *= 1.01; } else { (*x).mx *= 0.99; (*x).oldmx *= 0.99; } } } if (newrmsd > 0 ) (*x).oldrmsd = newrmsd; else { (*x).oldrmsd = 0.0; (*x).mx *= 1.01; (*x).oldmx = 1.01; } if ( (*x).mx > hi ) (*x).mx = hi; if ( (*x).mx < lo ) (*x).mx = lo; (*x).acc = (*x).rej = 0; } /*................................................................................*/ /* Accumulate a value into the statistics and update the mean and rms values. */ void accumulate(struct stat *q, double x) { (*q).sum += x; (*q).sum2 += x*x; (*q).samples++; (*q).mean = (*q).sum / (*q).samples; (*q).rms = sqrt(fabs((*q).sum2 / (*q).samples - (*q).sum * (*q).sum / (*q).samples / (*q).samples)); } void printeqstat(struct disp *dat, double scale, int length) { int i; for (i=0;i<length;i++) { if (RATIO(dat[i]) > 0) printf (" TYPE %d %.6lf / %.6lf\n", i, dat[i].mx/scale,RATIO(dat[i])); } } int memoryalloc(struct conf * conf) { printf ("Allocating memory...\n"); conf->particle = malloc( sizeof(struct particles)*MAXN); if(conf->particle == NULL){ return 1; } return 0; } int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim) { int dealloc_pairlist(struct topo * topo, struct sim * sim); printf ("Deallocating memory...\n"); if (conf->particle != NULL) free(conf->particle); conf->particle = NULL; if (sim->clusterlist != NULL) free(sim->clusterlist); if (sim->clustersenergy != NULL) free(sim->clustersenergy); if(topo->switchlist){ free(topo->switchlist); } if (sim->pairlist_update) { if(dealloc_pairlist(topo, sim)){ return 1; } } return 0; } /*............................................................................*/ /** * nice malloc, which does the error checking for us */ void * xmalloc (size_t num){ void *new = malloc (num); if (!new){ fprintf(stderr, "Couldn't allocate any memory!\n"); exit(1); } return new; } /*............................................................................*/ /* *********************** GEOMETRICAL FUNCTIONS **************************** */ /*.........................PATCHY SPOHEROCYLINDERS INTERACTION....................*/ /*................................................................................*/ /* Calculate intersections of sc2 with a patch of sc1 and return them in */ int psc_intersect(struct particles * part1, struct particles * part2, double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut, struct ia_param * param, int which, int patchnum) { int intrs; double a, b, c, d, e, x1, x2, rcut2; struct vector cm21, vec1, vec2, vec3, vec4; struct vector vec_crossproduct(struct vector, struct vector); struct vector vec_sub(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); int find_intersect_plane(struct particles *, struct particles *, double, struct vector, struct vector, double, double, double *); int test_intrpatch(struct particles *, struct vector, double, double, double *,int); intrs=0; rcut2=rcut*rcut; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at cut distance C*/ /*1a- test intersection with half planes of patch and look how far they are from spherocylinder. If closer then C we got itersection*/ /* plane1 */ /* find intersections of part2 with plane by par1 and patchsides[0] */ intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); // printf("plane1 %d\n", intrs); /* plane2 */ /* find intersections of part2 with plane by par1 and patchsides[1] */ intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] <0) ) { fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n"); exit (1); } // printf("plane2 %d\n", intrs); /*1b- test intersection with cylinder - it is at distance C*/ if (intrs < 2 ) { cm21=vec_scale(r_cm,-1.0); vec1=vec_crossproduct(cm21,part1->dir); vec2=vec_crossproduct(part2->dir,part1->dir); a = DOT(vec2,vec2); b = 2*DOT(vec1,vec2); c = -rcut*rcut + DOT(vec1,vec1); d = b*b - 4*a*c; if ( d >= 0) { /*there is intersection with infinite cylinder */ x1 = (-b+sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { /* vectors from center os sc1 to intersection with infinite cylinder*/ vec1.x=part2->dir.x*x1-r_cm.x; vec1.y=part2->dir.y*x1-r_cm.y; vec1.z=part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec1); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ){ x2 = (-b-sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec2.x = part2->dir.x*x2-r_cm.x; vec2.y = part2->dir.y*x2-r_cm.y; vec2.z = part2->dir.z*x2-r_cm.z; e = DOT(part1->dir,vec2); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } } // printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*1c- test intersection with spheres at the end - it is at distace C*/ if (intrs < 2 ) { /*centers of spheres*/ /*relative to the CM of sc2*/ vec1.x = part1->dir.x*halfl1 - r_cm.x; vec1.y = part1->dir.y*halfl1 - r_cm.y; vec1.z = part1->dir.z*halfl1 - r_cm.z; vec2.x = -part1->dir.x*halfl1 - r_cm.x; vec2.y = -part1->dir.y*halfl1 - r_cm.y; vec2.z = -part1->dir.z*halfl1 - r_cm.z; /*sphere1*/ a = DOT(part2->dir,part2->dir); b = 2.0*DOT(vec1,part2->dir); c = DOT(vec1,vec1)-rcut*rcut; d = b*b-4*a*c; if (d >= 0) { /*if d<0 there are no intersections*/ x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec3.x = part2->dir.x*x1-r_cm.x; vec3.y = part2->dir.y*x1-r_cm.y; vec3.z = part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec3); if ((e >= halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0) { x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec4.x = part2->dir.x*x2 - r_cm.x; vec4.y = part2->dir.y*x2 - r_cm.y; vec4.z = part2->dir.z*x2 - r_cm.z; e = DOT(part1->dir,vec4); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } // printf ("sphere1 %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*sphere2*/ a = DOT(part2->dir,part2->dir); b = 2.0*DOT(vec2,part2->dir); c = DOT(vec2,vec2)-rcut*rcut; d = b*b-4*a*c; if (d >= 0) { /*if d<0 there are no intersections*/ x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec3.x = part2->dir.x*x1 - r_cm.x; vec3.y = part2->dir.y*x1 - r_cm.y; vec3.z = part2->dir.z*x1 - r_cm.z; e = DOT(part1->dir,vec3); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ) { x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec4.x = part2->dir.x*x2 - r_cm.x; vec4.y = part2->dir.y*x2 - r_cm.y; vec4.z = part2->dir.z*x2 - r_cm.z; e = DOT(part1->dir,vec4); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } // printf ("sphere2 %d\n", intrs); } /*1d- if there is only one itersection shperocylinder ends within patch wedge set as second intersection end inside patch*/ if (intrs < 2 ) { /*whole spherocylinder is in or all out if intrs ==0*/ vec1.x = part2->dir.x*halfl2 - r_cm.x; vec1.y = part2->dir.y*halfl2 - r_cm.y; vec1.z = part2->dir.z*halfl2 - r_cm.z; /*vector from CM of sc1 to end of sc2*/ /*check is is inside sc1*/ a=DOT(vec1,part1->dir); vec3.x = vec1.x - part1->dir.x*a; vec3.y = vec1.y - part1->dir.y*a; vec3.z = vec1.z - part1->dir.z*a; b=DOT(vec3,vec3); d = fabs(a)-halfl1; if ( d <= 0) c = b; /*is inside cylindrical part*/ else c = d*d + b; /*is inside caps*/ /*c is distance squared from line or end to test if is inside sc*/ if (c < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum); if (intrs < 2 ) { vec2.x = -part2->dir.x*halfl2 - r_cm.x; vec2.y = -part2->dir.y*halfl2 - r_cm.y; vec2.z = -part2->dir.z*halfl2 - r_cm.z; /*check is is inside sc1*/ a=DOT(vec2,part1->dir); vec4.x = vec2.x - part1->dir.x*a; vec4.y = vec2.y - part1->dir.y*a; vec4.z = vec2.z - part1->dir.z*a; b=DOT(vec4,vec4); d = fabs(a) -halfl1; if (d <= 0) c = b; /*is inside cylindrical part*/ else c = d*d + b; /*is inside caps*/ /*c is distance squared from line or end to test if is inside sc*/ if (c < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum); } // printf ("ends %d\n", intrs); } return intrs; } /*................................................................................*/ /* Find if vector vec has angular intersection with patch of sc1 */ int test_intrpatch(struct particles * part1, struct vector vec, double cospatch, double ti, double intersections[5],int patchnum) { double a; int i, intrs; struct vector vec_perpproject(struct vector*, struct vector*); void normalise(struct vector *); intrs=0; /*test if we have intersection*/ /* do projection to patch plane*/ vec=vec_perpproject(&vec,&part1->dir); normalise(&vec); /* test angle distance from patch*/ a = DOT(part1->patchdir[patchnum],vec); if (a >= cospatch) { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) intersections[i]=ti; } return intrs; } /*................................................................................*/ /* Find intersections of SC and plane defined by vector w_vec.and returns number of them */ int find_intersect_plane(struct particles * part1, struct particles * part2, double halfl2, struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5]) { int i, intrs; double a, c, d, ti, disti; struct vector nplane, d_vec; void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); nplane=vec_crossproduct(part1->dir,w_vec); normalise(&nplane); normalise(&w_vec); a = DOT(nplane, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { ti = DOT(nplane,r_cm)/a; if ((ti > halfl2 ) || (ti < -halfl2)) intrs=0; /* there is no intersection plane sc is too short*/ else { d_vec.x = ti * part2->dir.x - r_cm.x; /*vector from intersection point to CM*/ d_vec.y = ti * part2->dir.y - r_cm.y; d_vec.z = ti * part2->dir.z - r_cm.z; c = DOT (d_vec, w_vec); if ( c * cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */ else { d = fabs(DOT (d_vec, part1->dir)) - halfl2; if (d <= 0) disti = c*c; /*is inside cylinder*/ else disti = d*d + c*c; /*is inside patch*/ if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */ else { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) { intersections[i]=ti; } } } } } return intrs; } /*CPSC................................................................................*/ /* Calculate intersections of sc2 with a patch of sc1 and return them in this works for cylindrical psc -CPSC */ int cpsc_intersect(struct particles * part1, struct particles * part2, double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut, struct ia_param * param, int which, int patchnum) { int intrs; double a, b, c, d, e, x1, x2, rcut2; struct vector cm21, vec1, vec2, vec3, vec4; struct vector vec_crossproduct(struct vector, struct vector); struct vector vec_sub(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); int find_intersect_planec(struct particles *, struct particles *, double, struct vector, struct vector, double, double, double *); int test_intrpatch(struct particles *, struct vector, double, double, double *, int); intrs=0; rcut2=rcut*rcut; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at cut distance C*/ /*1a- test intersection with half planes of patch and look how far they are from spherocylinder. If closer then C we got itersection*/ /* plane1 */ /* find intersections of part2 with plane by par1 and part1->patchsides[0] */ intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); // printf("plane1 %d\n", intrs); /* plane2 */ /* find intersections of part2 with plane by par1 and part1->patchsides[1] */ intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] < 0) ) { fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n"); exit (1); } // printf("plane2 %d\n", intrs); /*1b- test intersection with cylinder - it is at distance C*/ if (intrs < 2 ) { cm21=vec_scale(r_cm,-1.0); vec1=vec_crossproduct(cm21,part1->dir); vec2=vec_crossproduct(part2->dir,part1->dir); a = DOT(vec2,vec2); b = 2*DOT(vec1,vec2); c = -rcut*rcut + DOT(vec1,vec1); d = b*b - 4*a*c; if ( d >= 0) { /*there is intersection with infinite cylinder */ x1 = (-b+sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { /* vectors from center os sc1 to intersection with infinite cylinder*/ vec1.x=part2->dir.x*x1-r_cm.x; vec1.y=part2->dir.y*x1-r_cm.y; vec1.z=part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec1); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ){ x2 = (-b-sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec2.x = part2->dir.x*x2-r_cm.x; vec2.y = part2->dir.y*x2-r_cm.y; vec2.z = part2->dir.z*x2-r_cm.z; e = DOT(part1->dir,vec2); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } } // printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*1c- test intersection with plates at the end - it is at distace C and in wedge*/ if (intrs < 2 ) { a = DOT(part1->dir, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { /*plane cap1*/ vec1.x= r_cm.x + halfl1*part1->dir.x; vec1.y= r_cm.y + halfl1*part1->dir.y; vec1.z= r_cm.z + halfl1*part1->dir.z; x1 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/ if ((x1 > halfl2 ) || (x1 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/ else { vec2.x = x1*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */ vec2.y = x1*part2->dir.y - vec1.y; vec2.z = x1*part2->dir.z - vec1.z; b = DOT (vec2, vec2); if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } // printf ("plane cap1 %d %f\n", intrs, x1); /*plane cap2*/ vec1.x= r_cm.x - halfl1*part1->dir.x; vec1.y= r_cm.y - halfl1*part1->dir.y; vec1.z= r_cm.z - halfl1*part1->dir.z; x2 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/ if ((x2 > halfl2 ) || (x2 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/ else { vec2.x = x2*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */ vec2.y = x2*part2->dir.y - vec1.y; vec2.z = x2*part2->dir.z - vec1.z; b = DOT (vec2, vec2); if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } // printf ("plane cap2 %d %f\n", intrs,x2); } } /*1d- if there is only one itersection shperocylinder ends within patch wedge set as second intersection end inside patch*/ if (intrs < 2 ) { /*whole spherocylinder is in or all out if intrs ==0*/ vec1.x = part2->dir.x*halfl2 - r_cm.x; vec1.y = part2->dir.y*halfl2 - r_cm.y; vec1.z = part2->dir.z*halfl2 - r_cm.z; /*vector from CM of sc1 to end of sc2*/ /*check is is inside sc1*/ a=DOT(vec1,part1->dir); vec3.x = vec1.x - part1->dir.x*a; vec3.y = vec1.y - part1->dir.y*a; vec3.z = vec1.z - part1->dir.z*a; b=DOT(vec3,vec3); d = fabs(a)-halfl1; if ( d <= 0) { /*is in cylindrical part*/ /*c is distance squared from line or end to test if is inside sc*/ if (b < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum); } if (intrs < 2 ) { vec2.x = -part2->dir.x*halfl2 - r_cm.x; vec2.y = -part2->dir.y*halfl2 - r_cm.y; vec2.z = -part2->dir.z*halfl2 - r_cm.z; /*check is is inside sc1*/ a=DOT(vec2,part1->dir); vec4.x = vec2.x - part1->dir.x*a; vec4.y = vec2.y - part1->dir.y*a; vec4.z = vec2.z - part1->dir.z*a; b=DOT(vec4,vec4); d = fabs(a) -halfl1; if (d <= 0) { /*c is distance squared from line or end to test if is inside sc*/ if (b < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum); } } // printf ("ends %d\n", intrs); } return intrs; } /*CPSC................................................................................*/ /* Find intersections of plane defined by vector w_vec.and returns number of them - for cylindrical psc -CPSC */ int find_intersect_planec(struct particles * part1, struct particles * part2, double halfl, struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5]) { int i, intrs=0; double a, c, d, ti, disti; struct vector nplane, d_vec; void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); nplane=vec_crossproduct(part1->dir,w_vec); normalise(&nplane); normalise(&w_vec); a = DOT(nplane, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { ti = DOT(nplane,r_cm)/a; if ((ti > halfl ) || (ti < -halfl)) intrs=0; /* there is no intersection plane sc is too short*/ else { d_vec.x = ti*part2->dir.x - r_cm.x; /*vector from intersection point to CM*/ d_vec.y = ti*part2->dir.y - r_cm.y; d_vec.z = ti*part2->dir.z - r_cm.z; c = DOT (d_vec, w_vec); if ( c *cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */ else { d = fabs(DOT (d_vec, part1->dir)) - halfl; if (d <= 0) { disti= c*c; /*is inside cylinder*/ if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */ else { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) intersections[i]=ti; } } } } } return intrs; } /*..................................................................................*/ /* Find projection of cpsc on plane (0,0,1) including cutoff and return vector to its begining and end and cm */ int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cutdist, struct vector *partbeg, struct vector *partend) { struct vector vec1; double k,x1,x2,y1,y2,a,b,c,e,d; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); void normalise(struct vector*); if (( (*positive)&& (projectdir->z > 0) ) || ( (!(*positive))&& (projectdir->z < 0) )) return 0; if ( fabs(partbeg->z) > (*cutdist) ) return 0; /* we might have interacting segment*/ x2 = 0.0; y2 = 0.0; /*begining point*/ /*if begining projected along particle direction is within cutoff */ if (fabs(partdir->z) > ZEROTOL2) { projectinz(partbeg,partdir,pbeg); a=0; } else { /*we need some starting point*/ vec1.x = 2.0*partbeg->x - partend->x; vec1.y = 2.0*partbeg->y - partend->y; vec1.z = 2.0*partbeg->z - partend->z; projectinz(&vec1,projectdir,pbeg); a=1; } if (partdir->z != 0) { b = fabs(partbeg->z / partdir->z); } else { b = (*cutdist)+1.0; } if ( (b > (*cutdist)) || (a==1)) { /*else beginig is at sphere, find intersections with sphere of cutoff radius*/ if ( fabs(projectdir->z) > ZEROTOL2) { projectinz(partbeg,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pbeg->y; y2=pbeg->y; a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) ); x1 = partbeg->x + a; x2 = partbeg->x - a; if (pend->x > pbeg->x) {/*select the right intersection*/ pbeg->x = x2; x2 = x1; } else { pbeg->x = x1; } pbeg->y = y1; } else { k = (pend->x - pbeg->x)/ (pend->y - pbeg->y); a = k*k +1; b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x; c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x); e = b*b-a*c; if (e < 0) { return 0; /*tehre might be no intersection with sphere*/ } d = sqrt(e); if (pend->y > pbeg->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pbeg->y) + pbeg->x; x2 = k * (y2 - pbeg->y) + pbeg->x; pbeg->x = x1; pbeg->y = y1; pbeg->z = 0.0; } } //printf("pscwall beg %f %f \n",pbeg->x,pbeg->y); /*end point*/ a = -(*cutdist) * projectdir->z; /*z coordinate of point where projection is in cut distance*/ //printf("sphere end %f %f ",a,partend->z); if ( ((partend->z < a)&&(*positive)) || ((a < partend->z)&&(!(*positive))) ){ /*end is within cut off - second sphere*/ /*if this is the case vec1 is end of pherocylinder and pend is its projection*/ if (projectdir->z != 0) { projectinz(partend,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pend->y; y2=pend->y; a=sqrt( (*cutdist)*(*cutdist) - partend->z*partend->z - (pend->y-partend->y)*(pend->y-partend->y) ); x1 = partend->x + a; x2 = partend->x - a; if (pbeg->x > pend->x) {/*select the right intersection*/ pend->x = x2; } else { pend->x = x1; } pend->y = y1; } else { k = (pbeg->x - pend->x)/ (pbeg->y - pend->y); a = k*k +1; b = partend->y + k*k*pend->y - k*pend->x + k*partend->x; c = partend->y*partend->y + partend->z*partend->z - (*cutdist)*(*cutdist) + (k*pend->y - pend->x + partend->x)*(k*pend->y - pend->x + partend->x); e = b*b-a*c; if (e < 0) { return 0; /*there might be no intersection with sphere*/ } d = sqrt(e); if (pbeg->y > pend->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pend->y) + pend->x; x2 = k * (y2 - pend->y) + pend->x; pend->x = x1; pend->y = y1; pend->z = 0.0; } } else { if ( ((partbeg->z < a)&&(*positive)) || ((a < partbeg->z)&&(!(*positive))) ) { /*end is at cutoff going through cylindrical part*/ //printf("cylinder "); b = (a - partbeg->z)/ partdir->z; vec1.x = partbeg->x + b * partdir->x; vec1.y = partbeg->y + b * partdir->y; vec1.z = a; projectinz(&vec1,projectdir,pend); } else { /* also projected end is within the same sphere as begining- no contribution from cylinder*/ if (x2 == 0.0 ) { //printf("sphere beg "); if (projectdir->z != 0) { projectinz(partbeg,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pbeg->y; y2=pbeg->y; a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) ); x1 = partbeg->x + a; x2 = partbeg->x - a; if (pend->x > pbeg->x) {/*select the right intersection*/ pend->x = x1; } else { pend->x = x2; } pend->y = y1; } else { k = (pend->x - pbeg->x)/ (pend->y - pbeg->y); a = k*k +1; b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x; c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x); e = b*b-a*c; if (e < 0) { return 0; /*tehre might be no intersection with sphere*/ } d = sqrt(e); if (pend->y > pbeg->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pbeg->y) + pbeg->x; x2 = k * (y2 - pbeg->y) + pbeg->x; pend->x = x1; pend->y = y1; pend->z = 0.0; } } else { pend->x = x2; pend->y = y2; pend->z = 0.0; } return 2; /*line end is on sphere of particle begining = no cylindrical cutoff*/ } } return 1; } int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl, BOOL* orientin, BOOL* positive, double* rcmz, double * cutdist, struct vector *partbeg, struct vector *partend) { struct vector vec1; double a; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); if (( (*positive)&& (projectdir->z >= 0) ) || ( (!(*positive))&& (projectdir->z <= 0) )) return 0; /*if projected closer point beoynd cutoff no interaction*/ /*project begining of spherocylinder*/ vec1.x = partbeg->x; vec1.y = partbeg->y; vec1.z = partbeg->z; if (-vec1.z/projectdir->z < (*cutdist) ) { projectinz(&vec1,projectdir,pbeg); } else { return 0; } /* we have interacting segment*/ if (-partend->z/projectdir->z < (*cutdist) ) { /*whole segment interacts*/ vec1.z = partend->z; } else { vec1.z = -(*cutdist)*projectdir->z; } if (partdir->z != 0.0) a = (vec1.z - (*rcmz)) / partdir->z; else { if (*orientin) a = -(*halfl); else a = (*halfl); } vec1.x = partdir->x * a; vec1.y = partdir->y * a; projectinz(&vec1,projectdir,pend); return 1; } int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4, struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg, struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin) { double y1,y2,O2z,det,a,b,dirydirz,dir2x,dir2y,dir2z,dirzldiry; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); dirydirz = partdir->y * partdir->z; dir2x = partdir->x * partdir->x; dir2y = partdir->y * partdir->y; dir2z = partdir->z * partdir->z; a = 1/(dir2x+dir2y); if (partdir->x != 0) { O2z = partbeg->z * partbeg->z; b=dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x); if (b < 0 ) { /*no cutoff from cylindrical part*/ return 0; } det = sqrt(b); y1 = partbeg->y + (dirydirz*partbeg->z + det )*a; y2 = partbeg->y + (dirydirz*partbeg->z - det )*a; if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) { pextr1->y = y1; pextr2->y = y2; } else { pextr1->y = y2; pextr2->y = y1; } pextr1->x = partbeg->x + (partbeg->z*partdir->z - (pextr1->y - partbeg->y)*partdir->y) / partdir->x; pextr2->x = partbeg->x + (partbeg->z*partdir->z - (pextr2->y - partbeg->y)*partdir->y) / partdir->x; O2z = partend->z * partend->z; b= dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x); if (b >= 0) { /*we have intersections from end*/ det = sqrt(b); y1 = partend->y + (dirydirz * partend->z + det )*a; y2 = partend->y + (dirydirz * partend->z - det )*a; //printf("det %f y1 %f y2 %f \n", det,y1,y2); if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) { pextr3->y = y1; pextr4->y = y2; } else { pextr3->y = y2; pextr4->y = y1; } pextr3->x = partend->x + (partend->z*partdir->z - (pextr3->y - partend->y)*partdir->y) / partdir->x; pextr4->x = partend->x + (partend->z*partdir->z - (pextr4->y - partend->y)*partdir->y) / partdir->x; } else { /*no intersection at the end the cutoff intersects the plane in the perpendicular projection of line segemnt, so we have to use that point */ if (partdir->z == 0) { fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n"); exit (1); } else { a = ((*cuttoproject) - partbeg->z)/ partdir->z; //if ( projectdir->y * partdir->x < 0 ) pextr3->x = partbeg->x + a * partdir->x; pextr3->y = partbeg->y + a * partdir->y; pextr3->z = (*cuttoproject); //printf("before proj %f %f dir %f %f %f ",pextr3->x,pextr3->y,projectdir->x,projectdir->y,projectdir->z); projectinz(pextr3,projectdir,pextr4); pextr3->x = pextr4->x; pextr3->y = pextr4->y; pextr3->z = 0.0; //printf("after proj %f %f \n",pextr3->x,pextr3->y); return 2; } } } else { if (partdir->y != 0) { dirzldiry = partdir->z/partdir->y; y1 = partbeg->y + partbeg->z * dirzldiry; det = sqrt( (*cutdist)*(*cutdist) - partbeg->z * partbeg->z * (1+dirzldiry*dirzldiry) ); if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) { pextr1->x = partbeg->x + det; pextr2->x = partbeg->x - det; } else { pextr1->x = partbeg->x - det; pextr2->x = partbeg->x + det; } pextr1->y = y1; pextr2->y = y1; y1 = partend->y + partend->z * dirzldiry; b = (*cutdist)*(*cutdist) - partend->z * partend->z * (1+dirzldiry*dirzldiry); if (b >= 0) { /*we have intersections from end*/ det = sqrt(b); if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) { pextr3->x = partend->x + det; pextr4->x = partend->x - det; } else { pextr3->x = partend->x - det; pextr4->x = partend->x + det; } pextr3->y = y1; pextr4->y = y1; } else { /*no intersection at the end the cutoff intersects the plane in the perpendicular projection of line segemnt, so we have to use that point */ if (partdir->z == 0) { fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n"); exit (1); } else { a = ((*cutdist) - partbeg->z)/ partdir->z; y1 = a * partdir->y + partbeg->y; if ( projectdir->x * partdir->y > 0 ) { pextr3->x = a * partdir->x + partbeg->x; pextr3->y = y1; pextr4->x = pend->x; pextr4->y = pend->y; }else { pextr3->x = pend->x; pextr3->y = pend->y; pextr4->x = a * partdir->x + partbeg->x; pextr4->y = y1; } } } } else { return 0; /* if perpendicular to plane we don't have any intersections*/ } } return 1; } /*project a point in project direction to z plane z=0*/ void projectinz(struct vector* vec1, struct vector* projectdir,struct vector * projection) { projection->x = vec1->x - vec1->z * projectdir->x/projectdir->z; projection->y = vec1->y - vec1->z * projectdir->y/projectdir->z; projection->z = 0; } /*calculates area defined by four points in z=0 plane */ double areafourpoints(struct vector * pbeg, struct vector * pend, struct vector * pbeg1, struct vector * pend1 ) { double area =0.0; struct vector vec1,vec2; /*area by four points... two half vector cross product |(pbegining1-pbegining)x(pend-pbegining)|/2 */ vec1.x = pbeg1->x - pbeg->x; vec1.y = pbeg1->y - pbeg->y; vec2.x = pend->x - pbeg->x; vec2.y = pend->y - pbeg->y; //printf("a: %f %f %f %f \n",vec1.x,vec2.y,vec1.y,vec2.x); area += fabs(vec1.x*vec2.y - vec1.y*vec2.x)*0.5; /* + |(pend-pend1)x(pbegining1-pend1)|/2*/ vec1.x = pend->x - pend1->x; vec1.y = pend->y - pend1->y; vec2.x = pbeg1->x - pend1->x; vec2.y = pbeg1->y - pend1->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; return area; } /*calculates area defined by six points in z=0 plane */ double areaeightpoints(struct vector * p1, struct vector * p2, struct vector * p3, struct vector * p4, struct vector * p5, struct vector * p6,struct vector * p7, struct vector * p8) { double area =0.0; struct vector vec1,vec2; /*area by half vector cross product |(pbegining-pbegining)x(pend-pbegining)|/2 */ vec1.x = p2->x - p1->x; vec1.y = p2->y - p1->y; vec2.x = p3->x - p2->x; vec2.y = p3->y - p2->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("3"); if (p4 != NULL) { vec1.x = p3->x - p1->x; vec1.y = p3->y - p1->y; vec2.x = p4->x - p3->x; vec2.y = p4->y - p3->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("4"); if (p5 != NULL) { vec1.x = p4->x - p1->x; vec1.y = p4->y - p1->y; vec2.x = p5->x - p4->x; vec2.y = p5->y - p4->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("5"); if (p6 != NULL) { vec1.x = p5->x - p1->x; vec1.y = p5->y - p1->y; vec2.x = p6->x - p5->x; vec2.y = p6->y - p5->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("6"); if (p7 != NULL) { vec1.x = p6->x - p1->x; vec1.y = p6->y - p1->y; vec2.x = p7->x - p6->x; vec2.y = p7->y - p6->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("7"); if (p8 != NULL) { vec1.x = p7->x - p1->x; vec1.y = p7->y - p1->y; vec2.x = p8->x - p7->x; vec2.y = p8->y - p7->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("8"); } } } } } return area; } /*..............................................................................*/ /*........................INPUT OUTPUT..........................................*/ /*..............................................................................*/ /*..............................................................................*/ /** * convert string num into two integers */ void readii(char * num, int value[2]){ char *end, *num2; void trim (char *); value[0] = strtol(num, &num2, 10); trim(num2); if ((int)strlen(num2) > 0) value[1] = strtol(num2, &end, 10); else { value[1] =0; return; } if(*end){ fprintf(stderr, "Could not convert %s into two integers\n", num); exit(1); } return; } /** * convert string num into integer */ int readi(char * num){ char *end; int i = strtol(num, &end, 10); if(*end){ fprintf(stderr, "Could not convert %s into integer\n", num); exit(1); } return (int) i; } /** * convert string num into long */ long readl(char * num){ char *end; long i = strtol(num, &end, 10); if(*end){ fprintf(stderr, "Could not convert %s into long\n", num); exit(1); } return i; } /** * convert string num into double */ double readd(char * num){ char *end; double i = strtod(num, &end); if(*end){ fprintf(stderr, "Could not convert %s into double\n", num); exit(1); } return i; } /* Reads the run parameters from the external file "options". See the end of the code for a template. All comments starting with '#' are stripped out. The options are summarised on standard output and checked for validity of range. */ void read_options(struct sim* sim,char filename[30]) { int i; int num_options = -1; double transmx, rotmx, chainmmx, chainrmx; double angle, chain_angle; char *id, *value, *tokLine, *line; FILE *infile; void strip_comment (char *); void trim (char *); void readii(char * num, int value[2]); double readd(char *); long readl(char *); int readi(char *); /* for new options add before the last line */ Option options[] = { {"write_cluster", Long, FALSE, &sim->write_cluster}, {"adjust", Long, FALSE, &sim->adjust}, {"movie", Long, FALSE, &sim->movie}, {"nequil", Long, FALSE, &sim->nequil}, {"nsweeps", Long, FALSE, &sim->nsweeps}, {"nrepchange", Long, FALSE, &sim->nrepchange}, {"paramfrq", Long, FALSE, &sim->paramfrq}, {"report", Long, FALSE, &sim->report}, {"seed", Long, FALSE, &seed}, {"pairlist_update", Int, FALSE, &sim->pairlist_update}, {"ptype", Int, FALSE, &sim->ptype}, {"wlm", Int2, FALSE, &sim->wlm}, {"wlmtype", Int, FALSE, &sim->wl.wlmtype}, {"press", Double, FALSE, &sim->press}, {"paralpress", Double, FALSE, &sim->paralpress}, {"edge_mx", Double, FALSE, &sim->edge.mx}, {"shave", Double, FALSE, &sim->shave}, {"chainprob", Double, FALSE, &sim->chainprob}, {"switchprob", Double, FALSE, &sim->switchprob}, {"temper", Double, FALSE, &sim->temper}, {"paraltemper", Double, FALSE, &sim->paraltemper}, {"transmx", Double, FALSE, &transmx}, {"rotmx", Double, FALSE, &rotmx}, {"chainmmx", Double, FALSE, &chainmmx}, {"chainrmx", Double, FALSE, &chainrmx}, {"last", Int, FALSE, NULL} }; while(options[++num_options].var != NULL) ; /*--- 1. Read in values ---*/ size_t line_size = (STRLEN + 1) * sizeof(char); line = (char *) malloc(line_size); infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open options file.\n\n"); exit (1); } while(getline(&line, &line_size, infile) != -1){ // strip comments strip_comment(line); trim(line); if(strlen(line) == 0){ continue; } // tokenize tokLine = line; id = strtok(tokLine, "="); if(id == NULL){ fprintf(stderr, "error parsing Configuration line (%s)", line); free(line); exit(1); } trim(id); tokLine = NULL; value = strtok(tokLine, "="); trim(value); if(value == NULL){ fprintf(stderr, "error parsing Configuration line (%s)", line); free(line); exit(1); } //printf("id: %s; value: %s\n", id, value); for(i = 0; i < num_options; i++){ if(strcmp(id, options[i].id) == 0){ if(options[i].type == Int2){ readii(value,*((int (*)[2]) options[i].var)); options[i].set = TRUE; break; } if(options[i].type == Int){ *((int *) options[i].var) = readi(value); options[i].set = TRUE; break; } else if(options[i].type == Long){ *((long *) options[i].var) = readl(value); options[i].set = TRUE; break; } else if(options[i].type == Double){ *((double *) options[i].var) = readd(value); options[i].set = TRUE; break; } else { fprintf(stderr, "Could not determine type of %s!\n", id); free(line); exit(1); } } } if(i == num_options){ fprintf(stderr, "Unknown identifier %s!\nWill procede.\n", id); } } fclose (infile); free(line); /* Check, wheter all options have been readin */ for(i = 0; i < num_options; i++){ if(!options[i].set){ fprintf(stderr, "option '%s' is not set!\n", options[i].id); exit(1); } } /*--- 2. Summarize results on standard output ---*/ /* Density of close-packed spherocylinders */ // rho_cp = 2.0/(sqrt(2.0) + *length * sqrt(3.0)); printf (" Pressure coupling type: %d\n", sim->ptype); printf (" Pressure: %.8lf\n", sim->press); printf (" Replica exchange pressure: %.8lf\n", sim->paralpress); printf (" Average volume change attempts per sweep: %.8lf\n", sim->shave); printf (" Equilibration sweeps: %ld\n", sim->nequil); printf (" Sweeps between step size adjustments: %ld\n", sim->adjust); printf (" Production sweeps: %ld\n", sim->nsweeps); printf (" Sweeps between statistics samples: %ld\n", sim->paramfrq); printf (" Sweeps between statistics reports: %ld\n", sim->report); printf (" Average chain move attempts per sweep: %.8lf\n", sim->chainprob); printf (" Initial maximum displacement: %.8lf\n", transmx); printf (" Inititial maximum angular change (degrees): %.8lf\n", rotmx); printf (" Inititial maximum box edge change: %.8lf\n", sim->edge.mx); printf (" Initial maximum chain displacement: %.8lf\n", chainmmx); printf (" Inititial maximum chain angular change (degrees): %.8lf\n", chainrmx); printf (" Temperature in kT/e: %.8lf\n", sim->temper); printf (" Parallel tempering temperature in kT/e: %.8lf\n", sim->paraltemper); printf (" Sweeps between replica exchange: %ld\n", sim->nrepchange); printf (" Wang-Landau method: %d %d\n", sim->wlm[0],sim->wlm[1]); printf (" Calculate the Wang-Landau method for atom type: %d\n", sim->wl.wlmtype); printf (" Average type switch attempts per sweep: %.8lf\n", sim->switchprob); printf (" Number of Sweeps per pairlist update: %d\n", sim->pairlist_update); printf (" Random number seed: %ld\n", seed); printf (" Number of sweeps per writing out cluster info: %ld\n", sim->write_cluster); if (sim->movie > 0) { printf (" Sweeps between movie frames: %ld\n", sim->movie); } else { printf (" No movie\n"); } printf ("\n"); if(sim->pairlist_update){ printf(" A pairlist will be generated every %d steps. This is a greedy" " algorithm; make sure you don't have big chains etc.!\n", sim->pairlist_update); } /*--- 3. Validity checks ---*/ if (rotmx < 0.0 || rotmx > 180) { fprintf (stderr, "ERROR: Maximum orientation change must be in range 0 to 180.\n\n"); exit (1); } if (chainrmx < 0.0 || chainrmx > 180) { fprintf (stderr, "ERROR: Maximum orientation change for chains must be in range 0 to 180.\n\n"); exit (1); } if ( (sim->ptype <0) || (sim->ptype>3) ) { fprintf (stderr, "ERROR: Unknown pressure coupling %d. Program only knows: 0 - anisotropic coupling, \ 1 - isotropic coupling, 2 - isotropic in xy z=const, 3 - isotropic xy V=const.\n\n",sim->ptype); exit (1); } if ( (sim->wlm[0] <0) || (sim->wlm[0] > 7) || (sim->wlm[1] <0) || (sim->wlm[1] > 7) ) { fprintf (stderr, "ERROR: Unknown Wang-Landau method %d %d. Program only knows: 0 - none, \ 1 - z-direction od 1st particle, 2 - pore in membrane, 3 - zorientation of 0th particle,\ 4 - distance of fist two particles, 5 - pore around z-axis above CM,\ 6 - pore around z-axis above 0th particle, 7 - number of particles in contact \n\n",sim->wlm[0],sim->wlm[1]); exit (1); } if ( (sim->wlm[0] == 0) && (sim->wlm[1] > 0) ) { fprintf (stderr, "ERROR: Wang-Landau method has to be set for first order parameter and then for second order parameter\n\n"); exit (1); } if ( (sim->wlm[0] == 2) || (sim->wlm[0] == 5) || (sim->wlm[0] == 6) ) { if(sim->wl.wlmtype < 1){ fprintf (stderr, "ERROR: Atom type for the Wang-Landau Method (%d) was false defined.\n\n",sim->wl.wlmtype); exit (1); } if ( (sim->wlm[1] == 2) || (sim->wlm[1] == 5) || (sim->wlm[1] == 6) ) { fprintf (stderr, "ERROR: Simulaneous use of two pore order parameters has not been implemented yet.\n\n"); exit (1); } } /* we store maximum rotation as half angle - useful for quaterions*/ angle = rotmx / 180.0 * PIH *0.5; rotmx = cos((rotmx)/180.0*PIH); chain_angle = chainrmx / 180.0 * PIH; chainrmx = cos((chainrmx)/180.0*PIH); sim->edge.mx *= 2.0; /* The full range is -maxl to +maxl, i.e. spanning 2*maxl */ transmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */ chainmmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */ for (i=0;i<MAXT;i++) { sim->trans[i].mx = transmx; sim->rot[i].mx = rotmx; sim->rot[i].angle = angle; } for (i=0;i<MAXMT;i++) { sim->chainm[i].mx = chainmmx; sim->chainr[i].mx = chainrmx; sim->chainr[i].angle = chain_angle; } //parallel tempering #ifdef MPI if ( (sim->temper != sim->paraltemper) && (sim->mpinprocs <2) ) { printf("ERROR: Paralllel tempering at single core does not work.\n\n"); exit(1); } sim->dtemp = (sim->paraltemper - sim->temper )/(sim->mpinprocs-1); sim->temper += sim->dtemp * sim->mpirank; if ( (sim->press != sim->paralpress) && (sim->mpinprocs <2) ) { printf("ERROR: Pressure replica exchange at single core does not work.\n\n"); exit(1); } sim->dpress = (sim->paralpress - sim->press )/(sim->mpinprocs-1); sim->press += sim->dpress * sim->mpirank; seed += sim->mpirank; sim->mpiexch.mx = sim->dtemp; sim->mpiexch.angle = sim->dpress; #endif } /*..............................................................................*/ /* Used by read_options to read a long integer with error checking. NOT USED ANYMORE */ long read_long(FILE *infile, char *error) { char *gotline; char line[500]; int fields; long value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%ld", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /* Used by read_options to read a long integer with error checking. NOT USED ANYMORE */ int read_int(FILE *infile, char *error) { char *gotline; char line[500]; int fields; int value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%d", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /*..............................................................................*/ /* Used by read_options to read a double precision with error checking. NOT USED ANYMORE */ double read_double(FILE *infile, char *error) { char *gotline; char line[500]; int fields; double value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%le", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /*..............................................................................*/ /**************************************************************************** * CONFIG INITIALIZATION *****************************************************************************/ /* Reads in the initial configuration from the file "config.init". Each line contains the three components of the position vector and three components of the direction vector and three components of patch direction for a spherocylinder. The direction vector is normalised after being read in. The configuration is checked for particle overlaps. */ void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]) { int err,fields,tmp_type; long i,j,current,first; FILE * infile; char * line, line2[STRLEN]; size_t line_size = (STRLEN + 1) * sizeof(char); line = (char *) malloc(line_size); struct particles chorig[MAXCHL]; int overlap(struct particles, struct particles, struct vector, struct ia_param [MAXT][MAXT]); void normalise(struct vector *); void ortogonalise(struct vector *, struct vector); void usepbc(struct vector *, struct vector); double anint(double); void strip_comment (char *); void trim (char *); void aftercommand(char *, char *, char); double maxlength = 0; for(i = 0; i < MAXT; i++){ if(maxlength < topo->ia_params[i][i].len[0]) maxlength = topo->ia_params[i][i].len[0]; } infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open config.init file.\n\n"); exit (1); } if(getline(&line, &line_size, infile) == -1){ fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } strip_comment(line); trim(line); if (sscanf(line, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) { if(getline(&line, &line_size, infile) == -1){ fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } aftercommand(line2,line,BOXSEP); strip_comment(line2); trim(line2); if (sscanf(line2, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) { fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } } if (conf->box.x < maxlength * 2.0 + 2.0) { printf ("WARNING: x box length is less than two spherocylinders long.\n\n"); } if (conf->box.y < maxlength * 2.0 + 2.0) { printf ("WARNING: y box length is less than two spherocylinders long.\n\n"); } if (conf->box.z < maxlength * 2.0 + 2.0) { printf ("WARNING: z box length is less than two spherocylinders long.\n\n"); } DEBUG_INIT("Position of the particle"); for (i=0; i < topo->npart; i++) { if(getline(&line, &line_size, infile) == -1){ break; } strip_comment(line); trim(line); fields = sscanf(line, "%le %le %le %le %le %le %le %le %le %d", &conf->particle[i].pos.x, &conf->particle[i].pos.y, &conf->particle[i].pos.z, &conf->particle[i].dir.x, &conf->particle[i].dir.y, &conf->particle[i].dir.z, &conf->particle[i].patchdir[0].x, &conf->particle[i].patchdir[0].y, &conf->particle[i].patchdir[0].z, &conf->particle[i].switched); conf->particle[i].patchdir[1].x = conf->particle[i].patchdir[1].y = conf->particle[i].patchdir[1].z =0; conf->particle[i].chdir[0].x = conf->particle[i].chdir[0].y = conf->particle[i].chdir[0].z =0; conf->particle[i].chdir[1].x = conf->particle[i].chdir[1].y = conf->particle[i].chdir[1].z =0; DEBUG_INIT("Line: %s\nNumber of Fields: %d", line, fields); if (fields == 9){ conf->particle[i].switched = 0; fprintf(stdout, "WARNING: Particle %ld is assumed to be not switched!\n", i+1); fields++; } if (fields != 10) { fprintf (stderr, "ERROR: Could not read coordinates for particle %ld.\n \ Did you specify box size at the begining?\n\n", i+1); free(line); exit (1); } /* Scale position vector to the unit cube */ usepbc(&conf->particle[i].pos, conf->box ); conf->particle[i].pos.x /= conf->box.x; conf->particle[i].pos.y /= conf->box.y; conf->particle[i].pos.z /= conf->box.z; if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].dir, conf->particle[i].dir) < ZEROTOL )) { //DEBUG_INIT("Geotype = %d < %d", conf->particle[i].geotype,SP); fprintf (stderr, "ERROR: Null direction vector supplied for particle %ld.\n\n", i+1); free(line); exit (1); } else { normalise(&conf->particle[i].dir); } if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].patchdir[0], conf->particle[i].patchdir[0]) < ZEROTOL )) { fprintf (stderr, "ERROR: Null patch vector supplied for particle %ld.\n\n", i+1); free(line); exit (1); } else { ortogonalise(&conf->particle[i].patchdir[0],conf->particle[i].dir); normalise(&conf->particle[i].patchdir[0]); } // Switch the type if(conf->particle[i].switched){ if(conf->particle[i].switchtype == 0){ fprintf(stderr, "ERROR: Particle %ld switched even though it has no switchtype", i); free(line); exit(1); } tmp_type = conf->particle[i].type; conf->particle[i].type = conf->particle[i].switchtype; conf->particle[i].switchtype = tmp_type; } DEBUG_INIT("%ld:\t%lf\t%lf\t%lf", i, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z); } free(line); /*Make chains WHOLE*/ for (i=0;i<topo->chainnum;i++){ j=0; current = topo->chainlist[i][0]; first = current; chorig[0].pos = conf->particle[first].pos; while (current >=0 ) { /*shift the chain particle by first one*/ conf->particle[current].pos.x -= chorig[0].pos.x; conf->particle[current].pos.y -= chorig[0].pos.y; conf->particle[current].pos.z -= chorig[0].pos.z; /*put it in orig box*/ conf->particle[current].pos.x -= anint(conf->particle[current].pos.x); conf->particle[current].pos.y -= anint(conf->particle[current].pos.y); conf->particle[current].pos.z -= anint(conf->particle[current].pos.z); //printf("ant: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z); /*shot it back*/ conf->particle[current].pos.x += chorig[0].pos.x; conf->particle[current].pos.y += chorig[0].pos.y; conf->particle[current].pos.z += chorig[0].pos.z; //printf("posstart: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z); j++; current = topo->chainlist[i][j]; } } err = 0; //for (i=0; i < topo->npart-1; i++) { // for (j=i+1; j < topo->npart; j++) { // if ( overlap(conf->particle[i], conf->particle[j], conf->box, topo->ia_params) ) { // fprintf (stderr, // "ERROR: Overlap in initial coniguration between particles %ld and %ld.\n", // i+1, j+1); // err = 1; // } // } //} if (err) { printf ("\n"); exit (1); } fclose (infile); fflush (stdout); } /*..............................................................................*/ /**************************************************************************** * TOPOLOGY INITIALIZATION *****************************************************************************/ /* Create lists for chain operations: Connectivity list where it is written for each sc with which sc it is connected. The order is important because spherocylinders have direction First is interacting tail then head. Chain list where particles are assigned to chains to which they belong */ void init_top(struct topo * topo, struct conf * conf, struct sim * sim,char filename[30]) { long i,j,k,mol,maxch,maxpart; FILE *infile; char *pline=NULL, *dummy=NULL, *sysnames[MAXN]; char line[STRLEN], keystr[STRLEN], molname[STRLEN]; unsigned size; long *sysmoln /*[MAXN]*/; BOOL exclusions[MAXT][MAXT]; char *fgets2(char *, int , FILE *); void strip_comment (char *); void trim(char *); int continuing(char *); void upstring (char *); int filltypes(char **, struct topo * topo); int fillexter(char **, struct topo * topo); int fillexclusions(char **, BOOL (*exclusions)[MAXT][MAXT]); void beforecommand(char *, char *, char); int fillmol(char *, char *, struct molecule * molecules, struct topo * topo); int fillsystem(char *, char *[MAXN], long **); void initparams(struct topo * topo); void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]); int topdealoc(char **, char *[MAXN], long **, struct molecule *); struct molecule molecules[MAXMT]; if ((infile = fopen(filename, "r")) == NULL) { fprintf (stderr, "\nTOPOLOGY ERROR: Could not open top.init file.\n\n"); exit (1); } fprintf (stdout, "Initialize chainlist...\n"); fflush(stdout); sysmoln = malloc( sizeof(long)*MAXN); if(sysmoln == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sysmoln"); exit(1); } struct particles tmp_particles[MAXN]; for (i=0;i<MAXN;i++) { if (i < MAXMT) { topo->chainparam[i].bond1eq = -1; topo->chainparam[i].bond1c = -1; topo->chainparam[i].bond2eq = -1; topo->chainparam[i].bond2c = -1; topo->chainparam[i].bonddc = -1; topo->chainparam[i].angle1eq = -1; topo->chainparam[i].angle1c = -1; topo->chainparam[i].angle2eq = -1; topo->chainparam[i].angle2c = -1; molecules[i].name = NULL; molecules[i].type = malloc(sizeof(long)*MAXN); molecules[i].switchtype = malloc(sizeof(long)*MAXN); molecules[i].delta_mu = malloc(sizeof(double)*MAXN); for (j=0;j<MAXN;j++) { molecules[i].type[j] = -1; } } for (j = 0; j < MAXCHL; j++){ topo->chainlist[i][j] = -1; } sysnames[i]=NULL; } for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { exclusions[i][j]=FALSE; } } topo->exter.exist = FALSE; topo->exter.thickness = 0.0; topo->exter.epsilon = 0.0; topo->exter.attraction = 0.0; topo->exter.sqmaxcut = 0.0; for(i = 0; i < MAXT; i++){ for(j = 0; j < MAXT; j++){ for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = 0; } } } fprintf (stdout, "Reading topology...\n"); fflush(stdout); molname[0] = ' '; initparams(topo); pline=malloc((size_t)STRLEN); while (fgets2(line,STRLEN-2,infile) != NULL) { strcpy(pline,line); if (!pline) fprintf (stderr, "\nTOPOLOGY ERROR: Empty line in topology.\n\n"); /* build one long line from several fragments */ while (continuing(line) && (fgets2(line,STRLEN-1,infile) != NULL)) { size=strlen(pline)+strlen(line)+1; free(pline); pline=malloc((size_t)size); strcat(pline,line); } /* skip trailing and leading spaces and comment text */ strip_comment (pline); trim (pline); /* if there is something left... */ if ((int)strlen(pline) > 0) { // get the [COMMAND] key if (pline[0] == OPENKEY) { pline[0] = ' '; beforecommand(keystr,pline,CLOSEKEY); upstring (keystr); } else { //DEBUG fprintf (stdout, "Topology read type:%s, %s \n",keystr,pline); if (!strcmp(keystr,"TYPES")) { fflush(stdout); if (!filltypes(&pline, topo)) { DEBUG_INIT("Something went wrong with filltypes"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading types\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } DEBUG_INIT("back in init_top"); } else{ if (!strcmp(keystr,"MOLECULES")){ DEBUG_INIT("Let's go to the molecules"); if (molname[0] == ' ') { beforecommand(molname,pline,SEPARATOR); i=0; while (molecules[i].name != NULL) i++; DEBUG_INIT("in the middle of getting to fillmol"); molecules[i].name = malloc(strlen(molname)+1); strcpy(molecules[i].name, molname); fprintf (stdout, "Topology read for molecule: %s \n",molname); } if (!fillmol(molname, pline, molecules, topo)) { fprintf (stderr, "\nTOPOLOGY ERROR: in reading molecules\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } if ((dummy = strchr (pline,CLOSEMOL)) != NULL) molname[0] = ' '; } else { if (!strcmp(keystr,"SYSTEM")) { if (!fillsystem(pline,sysnames,&sysmoln)) { fprintf (stderr, "\nTOPOLOGY ERROR: in reading system\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { if (!strcmp(keystr,"EXTER")) { fflush(stdout); if (!fillexter(&pline, topo)) { DEBUG_INIT("Something went wrong with external potential"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading external potential\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { if (!strcmp(keystr,"EXCLUDE")) { fflush(stdout); if (!fillexclusions(&pline,&exclusions)) { DEBUG_INIT("Something went wrong with exclusions potential"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading exclusions\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { fprintf (stderr, "\nTOPOLOGY ERROR: invalid keyword:%s.\n\n", keystr); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } } } } } } } /*we have sucessfully read topology*/ if (pline !=NULL) free(pline); pline=NULL; fclose (infile); fflush (stdout); /*fill ia_params combinations*/ fprintf (stdout, "\nTopology succesfully read. Generating pair interactions...\n"); genparampairs(topo,&exclusions); double maxlength = 0; for(i = 0; i < MAXT; i++){ if(maxlength < topo->ia_params[i][i].len[0]) maxlength = topo->ia_params[i][i].len[0]; } topo->sqmaxcut += maxlength+2; topo->sqmaxcut *= 1.1; topo->maxcut = topo->sqmaxcut; topo->sqmaxcut = topo->sqmaxcut*topo->sqmaxcut; topo->exter.sqmaxcut += maxlength; topo->exter.sqmaxcut *= topo->exter.sqmaxcut*1.1; /*TODO fill chain list and maxch, park particle type*/ fprintf (stdout, "Generating chainlist...\n"); maxch=0; maxpart=0; i=0; while (sysnames[i]!=NULL) { mol=0; while (strcmp(molecules[mol].name,sysnames[i])) { mol++; if (molecules[mol].name == NULL) { fprintf (stderr, "TOPOLOGY ERROR: molecules %s is not defined.\n\n",sysnames[i]); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } } for (j=0;j<sysmoln[i];j++) { //DEBUG fprintf (stdout, "molnames %s sysname %s sysnum %ld \n",molnames[mol],sysnames[i],sysmoln[i]); k=0; while (molecules[mol].type[k] != -1) { tmp_particles[maxpart].type = molecules[mol].type[k]; tmp_particles[maxpart].switchtype = molecules[mol].switchtype[k]; tmp_particles[maxpart].delta_mu = molecules[mol].delta_mu[k]; tmp_particles[maxpart].chaint = mol; tmp_particles[maxpart].chainn = maxch; if (k > MAXCHL) { fprintf (stderr, "TOPOLOGY ERROR: more particles in chan (%ld) than allowed(%d).\n",k,MAXCHL); fprintf (stderr, "Change MAXCHL in source and recompile the program. \n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } if (molecules[mol].type[1] != -1) { topo->chainlist[maxch][k] = maxpart; } k++; maxpart++; if (maxpart > MAXN) { fprintf (stderr, "TOPOLOGY ERROR: more particles(%ld) than allowed(%d).\n",maxpart,MAXN); fprintf (stderr, "Change MAXN in source and recompile the program. \n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } } if (molecules[mol].type[1] != -1) { maxch++; } } i++; } topo->npart = maxpart; /* write the particles from the temporary to the "permanent" conf */ conf->particle = malloc(sizeof(struct particles) * topo->npart); if(conf->particle == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for conf->particle"); exit(1); } for(i = 0; i < topo->npart; i++){ conf->particle[i].type = tmp_particles[i].type; conf->particle[i].switchtype = tmp_particles[i].switchtype; conf->particle[i].delta_mu = tmp_particles[i].delta_mu; conf->particle[i].chaint = tmp_particles[i].chaint; conf->particle[i].chainn = tmp_particles[i].chainn; } /* Initialize the clusterlist */ sim->clusterlist = malloc(sizeof(long) * topo->npart); if(sim->clusterlist == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clusterlist!"); exit(1); } sim->clustersenergy = malloc(sizeof(double) * topo->npart); if(sim->clustersenergy== NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clustersenergy!"); exit(1); } sim->clusters = NULL; /* get all the particles with switch type */ long switchlist[topo->npart]; long n_switch_part = 0; for(i = 0; i < topo->npart; i++){ if(conf->particle[i].type != conf->particle[i].switchtype){ switchlist[n_switch_part] = i; n_switch_part++; } } topo->n_switch_part = n_switch_part; if (n_switch_part == 0 && sim->switchprob > 0){ fprintf(stderr, "TOPOLOGY WARNING: No switchable particles found, but probability for a switch is not zero!\n"); sim->switchprob = 0; fprintf(stderr, "TOPOLOGY WARNING: We changed Switch Probability to zero in this run!\n"); } topo->switchlist=NULL; if (n_switch_part > 0){ topo->switchlist = malloc(sizeof(long) * n_switch_part); for(i = 0; i < n_switch_part; i++){ topo->switchlist[i] = switchlist[i]; //DEBUG //printf("%ld is in switchlist\n", switchlist[i]); } } j = 0; while (topo->chainlist[j][0] >= 0) { j++; } topo->chainnum = j; if (topo->chainnum != maxch) { fprintf (stderr, "TOPOLOGY ERROR: Maximum number of chains(%ld) does not agree with number of chains (%ld)\n\n",maxch,topo->chainnum); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } k=0; /*clear connectivity and then fill it from chain list*/ fprintf (stdout, "Generating connectivity...\n"); for (i=0; i<MAXN; i++) { topo->conlist[i][0] = -1; topo->conlist[i][1] = -1; topo->conlist[i][2] = -1; topo->conlist[i][3] = -1; } conf->sysvolume = 0; for (i=0; i<maxpart; i++) { for (j=0; j<MAXCHL; j++) { if (topo->chainlist[i][j] >= 0) { k = topo->chainlist[i][j]; if ((j+1 < MAXCHL)&&(topo->chainlist[i][j+1] >= 0)) topo->conlist[k][1] = topo->chainlist[i][j+1]; /*if there is a next particle fill it to head bond*/ if (j > 0) topo->conlist[k][0] = topo->chainlist[i][j-1]; /*if this is not first particle fill tail bond*/ if ((j+2 < MAXCHL)&& (topo->chainlist[i][j+2] >= 0)) topo->conlist[k][3] = topo->chainlist[i][j+2]; /*if there is a second next particle fill it second neighbour*/ if (j > 1) topo->conlist[k][2] = topo->chainlist[i][j-2]; /*if this is not second or first particle fill second tail bond*/ } } conf->sysvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume; } /*DEBUG for (i=0; i<MAXN; i++) { for (j=0; j<MAXCHL; j++) { fprintf (stderr, " %d",chainlist[i][j]); } fprintf (stderr, " \n"); } for (i=0; i<MAXN; i++) { printf (" %ld %ld %ld %ld\n",conlist[i][0],conlist[i][1],conlist[i][2],conlist[i][3]); } */ // Mark particles as not switched for(i = 0; i < maxpart; i++){ conf->particle[i].switched = 0; } topdealoc(&pline,sysnames,&sysmoln, molecules); DEBUG_INIT("Finished with reading the topology"); /* Parallel tempering check */ #ifdef MPI // probability to switch replicas = exp ( -0.5 * dT*dT * N / (1 + dT) ) printf("Probability to switch replicas is roughly: %f\n",exp(-0.5 * maxpart * sim->dtemp * sim->dtemp / (1.0 + sim->dtemp)) ); #endif } /*..........................................................................*/ /*dealocting memory for init_top*/ int topdealoc(char **pline,char *sysnames[MAXN], long **sysmoln, struct molecule * molecules) { long i; if ((*pline) != NULL) free((*pline)); (*pline)=NULL; if ((*sysmoln) != NULL) free((*sysmoln)); (*sysmoln)=NULL; for (i=0;i<MAXN;i++) { if (i < MAXMT) { free(molecules[i].name); free(molecules[i].type); free(molecules[i].switchtype); free(molecules[i].delta_mu); } if ((sysnames[i]) != NULL) free(sysnames[i]); sysnames[i]=NULL; } return 0; } /* initiate vectors of a single particle*/ void int_partvec(long target, struct ia_param * ia_parami, struct conf * conf ) { struct quat quatrot; struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); void ortogonalise(struct vector *,struct vector); if ( (ia_parami->geotype[0] == SCA) || (ia_parami->geotype[0] == SCN) ){ /*SCA and SCN are isotropic... nothing to initialize*/ return; } normalise (&conf->particle[target].dir); ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir); /*calculate patch sides*/ if ( (ia_parami->geotype[0] == PSC) || (ia_parami->geotype[0] == CPSC) || (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){ /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[0]),quatrot); /*second side*/ conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[1]),quatrot); } /*calculate second patchdir*/ if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) || (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){ conf->particle[target].patchdir[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->csecpatchrot[0], ia_parami->ssecpatchrot[0]); vec_rotate(&(conf->particle[target].patchdir[1]),quatrot); ortogonalise(&conf->particle[target].patchdir[1],conf->particle[target].dir); } /*calculate second patch sides*/ if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){ /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[2]),quatrot); /*second side*/ conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[3]),quatrot); } /*calculate chdir vector*/ if ( (ia_parami->geotype[0] == CHPSC) || (ia_parami->geotype[0] == CHCPSC) || (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){ conf->particle[target].chdir[0] = conf->particle[target].dir; quatrot = quat_create(conf->particle[target].patchdir[0], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]); vec_rotate(&(conf->particle[target].chdir[0]), quatrot); /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[0]),quatrot); /*second side*/ conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[1]),quatrot); } /*calculate chdir vector for seond patch*/ if ( (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC) ){ conf->particle[target].chdir[1] = conf->particle[target].dir; quatrot = quat_create(conf->particle[target].patchdir[1], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]); vec_rotate(&(conf->particle[target].chdir[1]), quatrot); /* rotate patch vector by half size of patch to get sides*/ conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[2]),quatrot); /*second side*/ conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[3]),quatrot); } } /* calculate vectors on particles for speedup*/ void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf ) { long i; void int_partvec(long target, struct ia_param *, struct conf * conf ); for(i = 0; i < topo->npart; i++){ if ( topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0] < SP) int_partvec(i,&(topo->ia_params[conf->particle[i].type][conf->particle[i].type]),conf); } } /*generate interations pairs*/ void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]) { int i,j,k; int a[2]; int len; double length = 0; // The length of a PSC, currently only one is allow, ie implemented for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { if (i!=j) { if((topo->ia_params[j][j].geotype[0] != 0) && (topo->ia_params[i][i].geotype[0] != 0)){ a[0] = i; a[1] = j; for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = topo->ia_params[a[k]][a[k]].geotype[0]; topo->ia_params[i][j].len[k] = topo->ia_params[a[k]][a[k]].len[0]; if (topo->ia_params[a[k]][a[k]].len[0] > 0){ if (length == 0){ length = topo->ia_params[a[k]][a[k]].len[0]; } else if (length > 0){ if (length != topo->ia_params[a[k]][a[k]].len[0]){ fprintf(stderr, "Error: "); fprintf(stderr, "Different lengths for spherocylinders have not been implemented yet!\n"); fprintf(stderr, "\tCheck the length of type %d!\n", a[k]); exit(1); } } } topo->ia_params[i][j].half_len[k] = topo->ia_params[a[k]][a[k]].half_len[0]; /* Handle angles only, when geotype is a patchs sphero cylinder */ if(topo->ia_params[i][j].geotype[k] >= PSC && topo->ia_params[i][j].geotype[k] < SP){ topo->ia_params[i][j].pangl[k] = topo->ia_params[a[k]][a[k]].pangl[0]; topo->ia_params[i][j].panglsw[k] = topo->ia_params[a[k]][a[k]].panglsw[0]; topo->ia_params[i][j].pcangl[k] = cos(topo->ia_params[i][j].pangl[k]/2.0/180*PI); topo->ia_params[i][j].pcanglsw[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/180*PI); topo->ia_params[i][j].pcoshalfi[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/2.0/180*PI); topo->ia_params[i][j].psinhalfi[k] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k] * topo->ia_params[i][j].pcoshalfi[k]); } /* Only when the PSC is chiral */ if( (topo->ia_params[i][j].geotype[k] == CHCPSC) || (topo->ia_params[i][j].geotype[k] == CHPSC) \ || (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){ topo->ia_params[i][j].chiral_cos[k] = topo->ia_params[a[k]][a[k]].chiral_cos[0]; topo->ia_params[i][j].chiral_sin[k] = topo->ia_params[a[k]][a[k]].chiral_sin[0]; } /* Information of two patches */ if( (topo->ia_params[i][j].geotype[k] == TCPSC) || (topo->ia_params[i][j].geotype[k] == TPSC) \ || (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){ topo->ia_params[i][j].csecpatchrot[k] = topo->ia_params[a[k]][a[k]].csecpatchrot[0]; topo->ia_params[i][j].ssecpatchrot[k] = topo->ia_params[a[k]][a[k]].ssecpatchrot[0]; topo->ia_params[i][j].pangl[k+2] = topo->ia_params[a[k]][a[k]].pangl[2]; topo->ia_params[i][j].panglsw[k+2] = topo->ia_params[a[k]][a[k]].panglsw[2]; topo->ia_params[i][j].pcangl[k+2] = cos(topo->ia_params[i][j].pangl[k+2]/2.0/180*PI); topo->ia_params[i][j].pcanglsw[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/180*PI); topo->ia_params[i][j].pcoshalfi[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/2.0/180*PI); topo->ia_params[i][j].psinhalfi[k+2] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k+2] * topo->ia_params[i][j].pcoshalfi[k+2]); } } len = strlen(topo->ia_params[i][i].name); strncpy(topo->ia_params[i][j].name, topo->ia_params[i][i].name, len + 1); len = strlen(topo->ia_params[i][i].other_name); strncpy(topo->ia_params[i][j].other_name, topo->ia_params[i][i].other_name, len + 1); topo->ia_params[i][j].sigma = AVER(topo->ia_params[i][i].sigma,topo->ia_params[j][j].sigma); topo->ia_params[i][j].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->ia_params[j][j].epsilon); topo->ia_params[i][j].pswitch = AVER(topo->ia_params[i][i].pswitch,topo->ia_params[j][j].pswitch); topo->ia_params[i][j].rcutwca = (topo->ia_params[i][j].sigma)*pow(2.0,1.0/6.0); // Averaging of the flat part of attraction topo->ia_params[i][j].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, \ topo->ia_params[j][j].pdis - topo->ia_params[j][j].rcutwca) + topo->ia_params[i][j].rcutwca; topo->ia_params[i][j].rcut = topo->ia_params[i][j].pswitch+topo->ia_params[i][j].pdis; // if not non-attractive == if attractive if (!((topo->ia_params[i][j].geotype[0] % 10 == 0) || (topo->ia_params[i][j].geotype[1] % 10 == 0))){ if (topo->ia_params[i][j].rcutwca > topo->ia_params[i][j].rcut){ fprintf(stderr, "Error: Repulsive cutoff is larger than the attractive cutoff!\n"); fprintf(stderr, " between %d and %d: %lf > %lf\n", i, j, topo->ia_params[i][j].rcutwca, topo->ia_params[i][j].rcut); } } if ( topo->ia_params[i][j].rcutwca > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[i][j].rcutwca; if ( topo->ia_params[i][j].rcut > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[i][j].rcut; } } } /*filling interaction with external potential*/ if( (topo->exter.exist) && (topo->ia_params[i][i].geotype[0] != 0)){ /*use everything like for given particles except distance and attraction, which is generated as for other interactions*/ topo->exter.interactions[i] = topo->ia_params[i][i]; topo->exter.interactions[i].sigma = AVER(topo->ia_params[i][i].sigma, topo->exter.thickness); topo->exter.interactions[i].rcutwca = (topo->exter.interactions[i].sigma)*pow(2.0,1.0/6.0); topo->exter.interactions[i].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->exter.epsilon); topo->exter.interactions[i].pswitch = AVER(topo->ia_params[i][i].pswitch, topo->exter.attraction); topo->exter.interactions[i].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, 0.0) + topo->exter.interactions[i].rcutwca; topo->exter.interactions[i].rcut = topo->exter.interactions[i].pswitch + topo->exter.interactions[i].pdis; if (topo->exter.interactions[i].rcut > topo->exter.sqmaxcut ) topo->exter.sqmaxcut = topo->exter.interactions[i].rcut; } } for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { if ( (*exclusions)[i][j] ) topo->ia_params[i][j].epsilon = 0.0; } } } /*initialize parameters for interactions*/ void initparams(struct topo * topo) { int i,j,k; for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = 0; topo->ia_params[i][j].len[k] = 0.0; topo->ia_params[i][j].half_len[k] = 0.0; topo->ia_params[i][j].chiral_cos[k] = 0.0; topo->ia_params[i][j].chiral_sin[k] = 0.0; topo->ia_params[i][j].csecpatchrot[k] = 0.0; topo->ia_params[i][j].ssecpatchrot[k] = 0.0; } for(k = 2; k < 4; k++){ topo->ia_params[i][j].pangl[k] = 0.0; topo->ia_params[i][j].panglsw[k] = 0.0; topo->ia_params[i][j].pcangl[k] = 0.0; topo->ia_params[i][j].pcanglsw[k] = 0.0; topo->ia_params[i][j].pcoshalfi[k] = 0.0; topo->ia_params[i][j].psinhalfi[k] = 0.0; } topo->ia_params[i][j].sigma = 0.0; topo->ia_params[i][j].epsilon = 0.0; topo->ia_params[i][j].rcutwca = 0.0; topo->ia_params[i][j].pdis = 0.0; topo->ia_params[i][j].pswitch = 0.0; topo->ia_params[i][j].rcut = 0.0; topo->ia_params[i][j].volume = 0.0; topo->ia_params[i][j].pvolscale = 0.0; } } topo->sqmaxcut = 0; } /*...........................................................................*/ /*filling the system parameters*/ int fillsystem(char *pline, char *sysnames[MAXN], long **sysmoln) { int i,fields; char zz[STRLEN]; void trim (char *); trim(pline); if (!pline) { fprintf (stderr, "TOPOLOGY ERROR: obtained empty line in fil system.\n\n"); return 0; } i=0; while (sysnames[i]!=NULL) i++; fields = sscanf(pline, "%s %ld", zz, &(*sysmoln)[i]); sysnames[i]=malloc(strlen(zz)+1); strcpy(sysnames[i],zz); if (fields != 2) { fprintf (stderr, "TOPOLOGY ERROR: failed reading system from (%s).\n\n", pline); return 0; } if ((*sysmoln)[i] < 1) { fprintf (stderr, "TOPOLOGY ERROR: cannot have %ld number of molecules.\n\n", (*sysmoln)[i]); return 0; } fprintf (stdout, "system: %s %ld\n",sysnames[i],(*sysmoln)[i]); return 1; } /*filling the parameters for molecules*/ int fillmol(char *molname, char *pline, struct molecule * molecules, struct topo * topo) { DEBUG_INIT("fillmol just has been called!"); char str[STRLEN],str2[STRLEN],molcommand[STRLEN],molparams[STRLEN]; int i,j,fields; double bondk,bonddist; void trim (char *); void upstring(char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(str2, pline, CLOSEMOL); aftercommand(str, str2, OPENMOL); trim(str); if (strlen(str) == 0) return 1; beforecommand(molcommand,str,SEPARATOR); aftercommand(molparams,str,SEPARATOR); trim(molcommand); trim(molparams); upstring (molcommand); DEBUG_INIT("molcommand: %s", molcommand); DEBUG_INIT("molparams: %s", molparams); i=0; while (strcmp(molecules[i].name, molname)) i++; j=0; while (molecules[i].type[j] != -1) j++; if (!strcmp(molcommand,"PARTICLES")) { fprintf (stdout, "particle %d: \t", j + 1); fields = sscanf(molparams,"%ld %ld %lf",molecules[i].type + j, molecules[i].switchtype + j, molecules[i].delta_mu + j); fprintf (stdout, "%ld ",molecules[i].type[j]); if (fields == 1){ (molecules[i].switchtype[j]) = (molecules[i].type[j]); (molecules[i].delta_mu[j]) = 0; fields = 3; } else{ fprintf(stdout, "(with switchtype: %ld and delta_mu: %lf)", molecules[i].switchtype[j], molecules[i].delta_mu[j]); } if (fields != 3) { fprintf (stderr, "TOPOLOGY ERROR: could not read a pacticle.\n\n"); return 0; } fflush(stdout); if (molecules[i].type[j] < 0) { fprintf (stderr, "TOPOLOGY ERROR: pacticles include negative type.\n\n"); return 0; } if (molecules[i].type[j] > MAXT) { fprintf (stderr, "TOPOLOGY ERROR: pacticles include type out of range 0-%ld.\n\n",(long)MAXT); return 0; } fprintf (stdout, "\n"); return 1; } if (!strcmp(molcommand,"BOND1")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond1, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bond1c = bondk; topo->chainparam[i].bond1eq = bonddist; fprintf (stdout, "bond1: %f %f \n",topo->chainparam[i].bond1c,topo->chainparam[i].bond1eq); return 1; } if (!strcmp(molcommand,"BOND2")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond2, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bond2c = bondk; topo->chainparam[i].bond2eq = bonddist; fprintf (stdout, "bond2: %f %f \n",topo->chainparam[i].bond2c,topo->chainparam[i].bond2eq); return 1; } if (!strcmp(molcommand,"BONDD")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bondd, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bonddc = bondk; topo->chainparam[i].bonddeq = bonddist; fprintf (stdout, "bondd: %f %f \n",topo->chainparam[i].bonddc,topo->chainparam[i].bonddeq); return 1; } if (!strcmp(molcommand,"ANGLE1")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle1, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].angle1c = bondk; topo->chainparam[i].angle1eq = bonddist/180.0*PI; fprintf (stdout, "angle1: %f %f \n",topo->chainparam[i].angle1c,topo->chainparam[i].angle1eq); return 1; } if (!strcmp(molcommand,"ANGLE2")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle2, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].angle2c = bondk; topo->chainparam[i].angle2eq = bonddist/180.0*PI; fprintf (stdout, "angle2: %f %f \n",topo->chainparam[i].angle2c,topo->chainparam[i].angle2eq); return 1; } fprintf (stderr, "TOPOLOGY ERROR: unknown parameter: %s.\n\n",molcommand); return 0; } /* Converts the geometrical type string into a number */ int convert_geotype(char * geotype){ if (strcmp(geotype, "CPSC") == 0) return CPSC; if (strcmp(geotype, "CHCPSC") == 0) return CHCPSC; if (strcmp(geotype, "SCA") == 0) return SCA; if (strcmp(geotype, "PSC") == 0) return PSC; if (strcmp(geotype, "CHPSC") == 0) return CHPSC; if (strcmp(geotype, "TCPSC") == 0) return TCPSC; if (strcmp(geotype, "TCHCPSC") == 0) return TCHCPSC; if (strcmp(geotype, "TPSC") == 0) return TPSC; if (strcmp(geotype, "TCHPSC") == 0) return TCHPSC; if (strcmp(geotype, "SPN") == 0) return SPN; if (strcmp(geotype, "SPA") == 0) return SPA; return 0; } /*filling the parameters of external potentail - wall. Returns 1 on succes.*/ int fillexter(char **pline, struct topo * topo) { int fields; double param[3]; /* 0: thickness * 1: epsilon * 2: attraction */ char typestr[STRLEN], paramstr[STRLEN]; void trim (char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(typestr, *pline, SEPARATOR); aftercommand(paramstr, *pline, SEPARATOR); fields = sscanf(paramstr, "%le %le %le", &param[0], &param[1], &param[2]); if (fields >3) { fprintf (stderr, "TOPOLOGY ERROR: too many parameters for external potential. We have \ thickness, epsilon, and attraction distance so far.\n\n"); return 0; } if (fields >0) { topo->exter.exist = TRUE; topo->exter.thickness = param[0]; fprintf(stdout, "External potential with thickness: %le ",topo->exter.thickness); if (fields >1) { topo->exter.epsilon = param[1]; fprintf(stdout, "epsilon: %le ",topo->exter.epsilon); if (fields >2) { topo->exter.attraction = param[2]; fprintf(stdout, "and range of attraction: %le ",topo->exter.attraction); } } } else{ topo->exter.exist = FALSE; fprintf(stdout, "No external potential "); } fprintf(stdout, " \n"); DEBUG_INIT("Finished filling external potential"); return 1; } /*filling pair for which we exlude attraction interaction. Returns 1 on succes.*/ int fillexclusions(char **pline, BOOL (*exlusions)[MAXT][MAXT]) { long num1,num2; char *pline1, *pline2; void trim (char *); num1 = strtol(*pline, &pline2, 10); trim(pline2); if ((int)strlen(pline2) > 0) { num2 = strtol(pline2, &pline1, 10); trim(pline1); (*exlusions)[num1][num2]=TRUE; (*exlusions)[num2][num1]=TRUE; fprintf(stderr, "Exclusions %ld %ld \n", num1, num2); } else { fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n"); return 0; } while ((int)strlen(pline1) > 0) { num1 = strtol(pline1, &pline2, 10); trim(pline2); if ((int)strlen(pline2) > 0) { num2 = strtol(pline2, &pline1, 10); trim(pline1); (*exlusions)[num1][num2]=TRUE; (*exlusions)[num2][num1]=TRUE; fprintf(stderr, "Exclusions %ld %ld \n", num1, num2); } else { fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n"); return 0; } } return 1; } /*filing the parameters for types from given strings. Returns 1 on succes.*/ int filltypes(char **pline, struct topo * topo) { int type; int geotype_i; int fields; char name[SMSTR]; char geotype[SMSTR]; double param[11]; /* 0: epsilon * 1: sigma * 2: attraction dist * 3: sttraction switch * 4: patch angle * 5: patch switch * 6: length * 7(optional): second patche rotation * 8(optional): second patch angle * 9(optional): second patch angle switch * +1: chirality */ char typestr[STRLEN], paramstr[STRLEN]; void trim (char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(typestr, *pline, SEPARATOR); aftercommand(paramstr, *pline, SEPARATOR); fields = sscanf(paramstr, "%s %d %s %le %le %le %le %le %le %le %le %le %le %le", name, &type, geotype, &param[0], &param[1], &param[2], &param[3], &param[4], &param[5], &param[6], &param[7], &param[8], &param[9], &param[10]); fields -= 5; // number of parameter fields => I am too lazy to adjust everywhere below the numbers //DEBUG fprintf (stdout, "Topology read geotype: %ld with parameters fields %d, str:%s and %s in pline %s\n",geotype,fields,geotypestr,paramstr,pline); geotype_i = convert_geotype(geotype); if(!geotype_i){ fprintf(stderr, "TOPOLOGY ERROR: Unknown GEOTYPE: %s!", geotype); return 0; } DEBUG_INIT("geotype_i: %d; fields = %d", geotype_i, fields); if (( (geotype_i == SCN) || (geotype_i == SPN) ) && (fields != 0)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 1.\n\n", geotype); return 0; } if (( (geotype_i == SCA) || (geotype_i == SPA)) && (fields != 2)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 3.\n\n", geotype); return 0; } if (( (geotype_i == PSC) || (geotype_i == CPSC) ) && (fields != 5)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 5.\n\n", geotype); return 0; } if (( (geotype_i == CHCPSC) || (geotype_i == CHCPSC) )&& ( fields != 6)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 6.\n\n", geotype); return 0; } if (( (geotype_i == TPSC) || (geotype_i == TCPSC) ) && (fields != 8)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 8.\n\n", geotype); return 0; } if (( (geotype_i == TCHCPSC) || (geotype_i == TCHCPSC) )&& ( fields != 9)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 9.\n\n", geotype); return 0; } if ((geotype_i < 0) || (geotype_i > (MAXT + 10))) { fprintf (stderr, "TOPOLOGY ERROR: geotype (%s) is out of range: 0 - %d.\n\n", geotype, MAXT + 10); return 0; } strcpy(topo->ia_params[type][type].name, name); strcpy(topo->ia_params[type][type].other_name, name); topo->ia_params[type][type].geotype[0] = geotype_i; topo->ia_params[type][type].geotype[1] = geotype_i; topo->ia_params[type][type].epsilon = param[0]; topo->ia_params[type][type].sigma = param[1]; topo->ia_params[type][type].rcutwca = (topo->ia_params[type][type].sigma)*pow(2.0,1.0/6.0); fprintf(stdout, "Topology read of %d: %s (geotype: %s, %d) with parameters %lf %lf", type, name, geotype, geotype_i, topo->ia_params[type][type].epsilon, topo->ia_params[type][type].sigma); if (fields > 0) { topo->ia_params[type][type].pdis = param[2]; topo->ia_params[type][type].pswitch = param[3]; topo->ia_params[type][type].rcut = topo->ia_params[type][type].pswitch+topo->ia_params[type][type].pdis; fprintf(stdout, " %f %f",topo->ia_params[type][type].pdis,topo->ia_params[type][type].pswitch); } if (fields > 2) { int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].len[i] = param[6]; topo->ia_params[type][type].half_len[i] = param[6] / 2; topo->ia_params[type][type].pangl[i] = param[4]; topo->ia_params[type][type].panglsw[i] = param[5]; topo->ia_params[type][type].pcangl[i] = cos(param[4]/2.0/180*PI); // C1 topo->ia_params[type][type].pcanglsw[i] = cos((param[4]/2.0+param[5])/180*PI); // C2 //topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i]; //topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i]; topo->ia_params[type][type].pcoshalfi[i] = cos((param[4]/2.0+param[5])/2.0/180*PI); topo->ia_params[type][type].psinhalfi[i] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i] * topo->ia_params[type][type].pcoshalfi[i]); } fprintf(stdout, " %f %f", topo->ia_params[type][type].pangl[0], topo->ia_params[type][type].panglsw[0]); } if(fields == 6){ int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].chiral_cos[i] = cos(param[7] / 360 * PI); topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]); fprintf(stdout, " %f ", param[7]); } } if ((fields == 8)||(fields == 9)) { int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].csecpatchrot[i] = cos(param[7] / 360 * PI); topo->ia_params[type][type].ssecpatchrot[i] = sqrt(1 - topo->ia_params[type][type].csecpatchrot[i] * topo->ia_params[type][type].csecpatchrot[i]); //fprintf(stdout, " %f %f", topo->ia_params[type][type].csecpatchrot[0], topo->ia_params[type][type].ssecpatchrot[0]); topo->ia_params[type][type].pangl[i+2] = param[8]; topo->ia_params[type][type].panglsw[i+2] = param[9]; topo->ia_params[type][type].pcangl[i+2] = cos(param[8]/2.0/180*PI); // C1 topo->ia_params[type][type].pcanglsw[i+2] = cos((param[8]/2.0+param[9])/180*PI); // C2 //topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i]; //topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i]; topo->ia_params[type][type].pcoshalfi[i+2] = cos((param[8]/2.0+param[9])/2.0/180*PI); topo->ia_params[type][type].psinhalfi[i+2] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i+2] * topo->ia_params[type][type].pcoshalfi[i+2]); } fprintf(stdout, " %f %f %f", param[7], topo->ia_params[type][type].pangl[2], topo->ia_params[type][type].panglsw[2]); } if(fields == 9){ int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].chiral_cos[i] = cos(param[10] / 360 * PI); topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]); fprintf(stdout, " %f ", param[9]); } } // Volume if (geotype_i < SP) topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0) + PI/2.0*topo->ia_params[type][type].len[0]*pow((topo->ia_params[type][type].sigma)/2.0,2.0) ; else topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0); if ( topo->ia_params[type][type].rcutwca > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[type][type].rcutwca; if ( topo->ia_params[type][type].rcut > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[type][type].rcut; fprintf(stdout, " \n"); DEBUG_INIT("Finished filltypes"); return 1; } /************************************************ * String Manipulation stuff for parsing files ************************************************/ /* return string that goes before comand character*/ void beforecommand(char *str,char *pline,char commandc) { char *dummy; void trim(char *); strcpy(str,pline); if ((dummy = strchr (str,commandc)) != NULL) (*dummy) = 0; trim (str); } /* return string that goes after command character */ void aftercommand(char *str, char *pline,char commandc) { char *dummy; int i; void trim(char *); strcpy(str,pline); if ((dummy = strchr (str,commandc)) != NULL) { i=0; while( (*dummy) != str[i]) { str[i] = ' '; i++; } str[i] = ' '; } trim (str); } /* reads a string from stream of max length n */ char *fgets2(char *line, int n, FILE *stream) { char *c; if (fgets(line,n,stream)==NULL) { return NULL; } if ((c=strchr(line,'\n'))!=NULL) *c=0; return line; } /* remove comments */ void strip_comment (char *line) { char *c; if (!line) return; /* search for a comment mark and replace it by a zero */ if ((c = strchr(line,COMMENTSIGN)) != NULL) (*c) = 0; } /*test is there is still something left in string*/ int continuing(char *s) { int sl; void rtrim (char *str); rtrim(s); sl = strlen(s); if ((sl > 0) && (s[sl-1] == CONTINUE)) { s[sl-1] = 0; return 1; /*true*/ } else return 0; /*false*/ } /*make strin uppercase*/ void upstring (char *str) { int i; for (i=0; (i < (int)strlen(str)); i++) str[i] = toupper(str[i]); } /*trim string from left*/ void ltrim (char *str) { char *tr; int c; if (!str) return; tr = strdup (str); c = 0; while ((tr[c] == ' ') || (tr[c] == '\n') || (tr[c] == '\t')) c++; strcpy (str,tr+c); free (tr); } /*trim string from right*/ void rtrim (char *str) { int nul; if (!str) return; nul = strlen(str)-1; while ((nul > 0) && ((str[nul] == ' ') || (str[nul] == '\t') || (str[nul] == '\n')) ) { str[nul] = '\0'; nul--; } } /*trim strin from left and right*/ void trim (char *str) { void ltrim (char *str); void rtrim (char *str); ltrim (str); rtrim (str); } /** * Dumps a configuration to the supplied file handle. */ void draw(FILE *outfile, /*struct vector box, long npart, struct particles *particle,*/ struct conf * conf, struct topo * topo) { long i; double anint(double); //fprintf (outfile, "%15.8le %15.8le %15.8le\n", box.x, box.y, box.z); #ifdef TESTING for (i = 0; i < topo->npart; i++) { fprintf (outfile, "%15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %d\n", conf->box.x * ((conf->particle[i].pos.x) - anint(conf->particle[i].pos.x)), conf->box.y * ((conf->particle[i].pos.y) - anint(conf->particle[i].pos.y)), conf->box.z * ((conf->particle[i].pos.z) - anint(conf->particle[i].pos.z)), conf->particle[i].dir.x, conf->particle[i].dir.y, conf->particle[i].dir.z, conf->particle[i].patchdir[0].x, conf->particle[i].patchdir[0].y, conf->particle[i].patchdir[0].z, conf->particle[i].switched); } #else for (i = 0; i < topo->npart; i++) { fprintf (outfile, "%15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %d\n", conf->box.x * ((conf->particle[i].pos.x) - anint(conf->particle[i].pos.x)), conf->box.y * ((conf->particle[i].pos.y) - anint(conf->particle[i].pos.y)), conf->box.z * ((conf->particle[i].pos.z) - anint(conf->particle[i].pos.z)), conf->particle[i].dir.x, conf->particle[i].dir.y, conf->particle[i].dir.z, conf->particle[i].patchdir[0].x, conf->particle[i].patchdir[0].y, conf->particle[i].patchdir[0].z, conf->particle[i].switched); } #endif } /*............................................................................*/ /****************************************************************************/ /* Pairlist stuf */ /****************************************************************************/ /** * Initializes the pairlist and allocates memory */ void init_pairlist(struct topo * topo, struct sim * sim){ printf("\nAllocating memory for pairlist...\n"); sim->pairlist = xmalloc(sizeof(struct pairs) * topo->npart); // Highest guess: Every particle interacts with the others // TODO: Make it more sophisticated long i; for(i = 0; i < topo->npart; i++){ sim->pairlist[i].pairs = malloc(sizeof(long) * topo->npart); sim->pairlist[i].num_pairs = 0; } } /*............................................................................*/ /** * Cleans up: deallocates the memory for the pairlist */ int dealloc_pairlist(struct topo * topo, struct sim * sim){ long i; if(sim->pairlist != NULL){ for(i = 0; i < topo->npart; i++){ if(sim->pairlist[i].pairs != NULL){ free(sim->pairlist[i].pairs); } } free(sim->pairlist); } return 0; } /*............................................................................*/ /** * Generates a pairlist with a very basic alogrithm */ void gen_simple_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){ struct vector r_cm; double r_cm2; double max_dist; // Set the pairlist to zero //DEBUG_INIT("Gen Pairlist") long i, j; for(i = 0; i < topo->npart; i++){ //DEBUG_INIT("%ld", i); sim->pairlist[i].num_pairs = 0; } long nj = topo->npart; long ni = nj - 1; for(i = 0; i < ni; i++){ for(j = i + 1; j < nj; j++){ r_cm.x = conf->particle[i].pos.x - conf->particle[j].pos.x; r_cm.y = conf->particle[i].pos.y - conf->particle[j].pos.y; r_cm.z = conf->particle[i].pos.z - conf->particle[j].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); r_cm2 = DOT(r_cm,r_cm); max_dist = AVER(sim->trans[conf->particle[i].type].mx, \ sim->trans[conf->particle[j].type].mx); max_dist *= (1 + sim->pairlist_update) * 2; max_dist += topo->maxcut; max_dist *= max_dist; /* squared */ if (r_cm2 <= max_dist){ sim->pairlist[i].pairs[sim->pairlist[i].num_pairs++] = j; sim->pairlist[j].pairs[sim->pairlist[j].num_pairs++] = i; } } } ////Check for too many pairs //for(i = 0; i < topo->npart; i++){ // //if (sim->pairlist.list[i].num_pairs >= topo->npart) // if (sim->pairlist[i].num_pairs >= topo->npart){ // fprintf(stderr, "ERROR: Too many pairs for particle %ld!!!\n", i); // exit(1); // } //} } /*.............................................................................*/ /** * Interface for the generation of the pairlist. Define other pairlist * algorithms above. */ void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){ gen_simple_pairlist(topo, sim, conf); } /*.............................................................................*/ /** * Print out the pairlist */ void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo){ long i, j; for (i = 0; i < topo->npart; i++){ fprintf(stream, "%ld (%ld):", i, sim->pairlist[i].num_pairs); for(j = 0; j < sim->pairlist[i].num_pairs; j++){ fprintf(stream, " %ld", sim->pairlist[i].pairs[j]); } fprintf(stream, "\n"); } } /*..........................................................................*/ /****************************************************************************/ /* Cluster statistics stuf */ /****************************************************************************/ /** * determines, wheter two particles are in the same cluster */ int same_cluster(struct topo * topo, struct conf * conf, long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *) ){ /*if two particles are bonded they belong to the same cluster*/ if ( ((topo->chainparam[conf->particle[fst].chaint]).bond1c >= 0) || ((topo->chainparam[conf->particle[fst].chaint]).bonddc >= 0) ){ if ( (snd == topo->conlist[fst][1]) || (snd == topo->conlist[fst][0]) ) { return TRUE; } } if ( ((topo->chainparam[conf->particle[snd].chaint]).bond1c >= 0) || ((topo->chainparam[conf->particle[snd].chaint]).bonddc >= 0) ){ if ( (fst == topo->conlist[snd][1]) || (fst == topo->conlist[snd][0]) ) { return TRUE; } } /*cluster is made of particles closer tna some distance*/ /* struct vector image(struct vector r1, struct vector r2, struct vector box); struct vector r_cm = image(conf->particle[fst].pos, conf->particle[snd].pos, conf->box); double dist2 = DOT(r_cm, r_cm); * TODO: Make it much more efficient => define cluster_dist!!! * if(dist2 > topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma * topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma*4.0){ return FALSE; } else { return TRUE; }*/ /*cluster is made of attractively interacting particles*/ double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); if(paire(fst, snd, intfce, topo, conf) > -0.10 ){ return FALSE; } else { return TRUE; } } /*............................................................................*/ /** * generate the clusterlist */ int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ int change = TRUE; /* does it still change? */ //long neighbour; long i, j, fst, snd, tmp, minnumber, maxnumber; int same_cluster(struct topo * topo, struct conf * conf, long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *)); // Set clusterindex to the corresponding index for( i = 0; i < topo->npart; i++){ sim->clusterlist[i] = i; } // Start determining the cluster while(change){ change = FALSE; for(i = 0; i < topo->npart; i++){ /*If nore pairlist go over all pairs*/ maxnumber = topo->npart; minnumber = i ; if (sim->pairlist_update) { maxnumber = sim->pairlist[i].num_pairs; minnumber=0; } /* Go over pairs to see if they are in the cluster */ for(j = minnumber; j < maxnumber; j++){ fst = i; snd = j; if (sim->pairlist_update) { snd = sim->pairlist[i].pairs[j]; } /*do cluster analysis only for spherocylinders*/ if ( (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[0] < SP) && \ (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[1] < SP) ) { /* if they are close to each other */ if(same_cluster(topo, conf, fst, snd, intfce)){ if(fst > snd){ tmp = snd; snd = fst; fst = tmp; } if(sim->clusterlist[fst] < sim->clusterlist[snd]){ sim->clusterlist[snd] = sim->clusterlist[fst]; change = TRUE; break; /* => will eventually start the i loop from new */ } if(sim->clusterlist[snd] < sim->clusterlist[fst]){ sim->clusterlist[fst] = sim->clusterlist[snd]; change = TRUE; break; /* => will eventually start the i loop from new */ } } } } if(change){ break; } } } return 0; } /*............................................................................*/ /** * sort the clusterlist */ int sort_clusterlist(struct topo * topo, struct sim * sim){ long cluster_indices[topo->npart]; /* holds the different cluster indices. (currently too much memory) */ long num_cluster = 0; /* number of clusters, temporary needed */ long i, j; /* how many clusters are there? */ long max_index = -1; for(i = 0; i < topo->npart; i++){ if(max_index < sim->clusterlist[i]){ max_index = sim->clusterlist[i]; cluster_indices[num_cluster++] = max_index; } } /* free the memory from the old clusters */ if(sim->clusters){ for(i = 0; i < sim->num_cluster; i++){ if(sim->clusters[i].particles){ free(sim->clusters[i].particles); } } free(sim->clusters); } /* Allocate memory for the clusters */ sim->clusters = xmalloc(sizeof(struct cluster) * num_cluster); for(i = 0; i < num_cluster; i++){ /* allocate maximal space for all the clusters */ sim->clusters[i].particles = xmalloc(sizeof(long) * topo->npart); sim->clusters[i].npart = 0; } /* fill in the particles belonging to one cluster */ for(i = 0; i < num_cluster; i++){ for(j = 0; j < topo->npart; j++){ if(sim->clusterlist[j] == cluster_indices[i]){ sim->clusters[i].particles[sim->clusters[i].npart++] = j; } } } sim->num_cluster = num_cluster; /* Find the biggest size */ sim->max_clust = 0; for(i = 0; i < num_cluster; i++){ if(sim->clusters[i].npart > sim->max_clust){ sim->max_clust = sim->clusters[i].npart; } } /* Set the statistics to zero */ sim->clusterstat = xmalloc(sizeof(long) * sim->max_clust); for(i = 0; i < sim->max_clust; i++){ sim->clusterstat[i] = 0; } /* Do the statistics */ for(i = 0; i < num_cluster; i++){ sim->clusterstat[sim->clusters[i].npart - 1]++; } return 0; } /*............................................................................*/ /** * calculate energies of clusters * */ int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ long i,j,k; double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); for(i = 0; i < sim->num_cluster; i++){ sim->clustersenergy[i]=0.0; for(j = 0; j < sim->clusters[i].npart; j++){ for(k = j+1; k < sim->clusters[i].npart; k++){ sim->clustersenergy[i]+= paire(sim->clusters[i].particles[j], sim->clusters[i].particles[k], intfce, topo, conf); } } } return 0; } /*............................................................................*/ /** * print the clusterlist * */ int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf){ long i; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " The Cluster List\n" " (Index starts with 1)\n" "-----------------------------------------------------\n"); } for(i = 0; i < topo->npart; i++){ fprintf(stream,"%3ld %3ld %8.4lf %8.4lf %8.4lf", i + 1, sim->clusterlist[i] + 1, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z); fprintf(stream,"\n"); } if(decor){ fprintf(stream,"-----------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * print the clusters * */ int print_clusters(FILE * stream, BOOL decor, struct sim * sim){ long i, j; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " The Clusters\n" " (Index starts with 1)\n" "-----------------------------------------------------\n"); } for(i = 0; i < sim->num_cluster; i++){ fprintf(stream, "%3ld(%f):", i + 1,sim->clustersenergy[i]); for(j = 0; j < sim->clusters[i].npart; j++){ fprintf(stream, "%5ld", sim->clusters[i].particles[j] + 1); } fprintf(stream, "\n"); } if(decor){ fprintf(stream,"---------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * print a statistics for the clusters */ int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim){ long i; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " Cluster Distribution\n" "-----------------------------------------------------\n"); } for(i = 0; i < sim->max_clust; i++){ fprintf(stream, "%5ld\t%5ld\n", i + 1, sim->clusterstat[i]); } if(decor){ fprintf(stream, "--------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * Alternative way of printing the cluster statistics: everything is on * one line. First monomers, then dimers etc. */ int print_clstat_oneline(FILE * stream, long sweep, struct sim * sim){ long i; fprintf(stream, "%ld: ", sweep); for(i = 0; i < sim->max_clust; i++){ fprintf(stream, "%5ld\t", sim->clusterstat[i]); } fprintf(stream, "\n"); fflush(stream); return 0; } /** * write out all the cluster stat in files, if file name is given */ int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep, struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); int sort_clusterlist(struct topo * topo, struct sim * sim); int print_clusters(FILE * stream, BOOL decor, struct sim * sim); int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); gen_clusterlist(topo, sim, conf, intfce); sort_clusterlist(topo, sim); calc_clusterenergies(topo, sim, conf, intfce); if(cl_stat){ if(decor == FALSE){ // if no decor, this means usually into a file. Hence print info // about number of line per frame fprintf(cl_stat, "Sweep: %ld | Maximal size: %ld\n", sweep, sim->max_clust); } print_clusterstat(cl_stat, decor, sim); /* print_clstat_oneline(cl_stat, sweep, sim); */ } if(cl){ if(decor == FALSE){ fprintf(cl, "Sweep: %ld | Number of clusters: %ld\n", sweep, sim->num_cluster); } print_clusters(cl, decor, sim); } if(cl_list){ if(decor == FALSE){ fprintf(cl_list, "Sweep: %ld | Number of particles: %ld\n", sweep, topo->npart); } print_clusterlist(cl, decor, topo, sim, conf); } return 0; } /*............................................................................*/ /****************************************************************************/ /* Wang-Landau stuf */ /****************************************************************************/ /* Initiate Wang-Landau calculation. */ int wlinit(struct wls *wl, char filename[30]) { long i,length,fields=0; double field[5]; FILE *infile; char line[STRLEN]; int wlend(struct wls *); void trim(char *); void strip_comment(char *); infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename); return 1; } length=0; while (fgets2(line,STRLEN-2,infile) != NULL) { strip_comment (line); trim (line); /* if there is something left... */ if ((int)strlen(line) > 0) { length++; } } length--; /*there is alpha at the first line*/ (*wl).weights = malloc( sizeof(double) * length ); (*wl).hist = malloc( sizeof(long) * length ); (*wl).length[1] = 0; (*wl).dorder[1] = 0; fseek(infile,0,SEEK_SET); i=0; while (fgets2(line,STRLEN-2,infile) != NULL) { strip_comment (line); trim (line); /* if there is something left... */ if ((int)strlen(line) > 0) { if (i == 0) { if (sscanf(line, "%le",&(*wl).alpha)!= 1) { fprintf (stderr, "ERROR: Could not read alpha at the begining.\n\n"); wlend(wl); return 1; } else i++; } else { fields = sscanf(line, "%le %le %le %le",&field[0],&field[1],&field[2],&field[3]); if ( fields == 3 ) { if (i==1) (*wl).minorder[0] = field[0]; (*wl).weights[i-1] = field[1]; (*wl).hist[i-1] = field[2]; (*wl).length[0]++; i++; } else if (fields == 4 ) { if (i==1) { (*wl).minorder[0] = field[0]; (*wl).minorder[1] = field[1]; } if ( (*wl).minorder[1] == field[1] ) (*wl).length[0]++; (*wl).weights[i-1] = field[2]; (*wl).hist[i-1] = field[3]; i++; } else { fprintf (stderr, "ERROR: Could not read order parameter at line %ld.\n\n", i); wlend(wl); return 1; } } } } if (fields == 4 ) { (*wl).length[1] = length / (*wl).length[0]; (*wl).dorder[1] = (field[1] - (*wl).minorder[1])/((*wl).length[1]-1); } (*wl).dorder[0] = (field[0] - (*wl).minorder[0])/((*wl).length[0]-1); if ( ( (i-1) != (*wl).length[0] ) && (fields==3) ) { fprintf (stderr, "ERROR: In reading order parameters length %ld does not fit number of lines %ld.\n\n", (*wl).length[0],i-1); wlend(wl); return 1; } if ( ( (i-1) != (*wl).length[0]*(*wl).length[1] ) && (fields==4) ) { fprintf (stderr, "ERROR: In reading order parameters lengths %ld %ld does not fit number of lines %ld.\n\n", (*wl).length[0],(*wl).length[1],i-1); wlend(wl); return 1; } /*DEBUG*/ printf("Wang-Landau method init:\n"); printf("alpha: %f\n",(*wl).alpha); /*int j=0; if ((*wl).length[1] == 0) { for (i=0; i<(*wl).length[0]; i++) { printf ("%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]); } } else { for (j=0; j<(*wl).length[1]; j++) { for (i=0; i<(*wl).length[0]; i++) { printf ("%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]); } printf (" \n"); } }*/ fclose(infile); fflush(stdout); /**/ return 0; } int wlwrite(struct wls *wl, char filename[30]) { long i,j; FILE *outfile; outfile = fopen(filename, "w"); if (outfile == NULL) { fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename); return 1; } fprintf (outfile, "%15.8le \n",(*wl).alpha); if ((*wl).length[1] == 0) { for (i=0; i<(*wl).length[0]; i++) { fprintf (outfile, "%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]); } } else { for (j=0; j<(*wl).length[1]; j++) { for (i=0; i<(*wl).length[0]; i++) { fprintf (outfile, "%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]); } fprintf (outfile, " \n"); } } fflush(outfile); fclose(outfile); return 0; } int wlend(struct wls *wl) { free((*wl).weights); free((*wl).hist); return 0; } void wlreject(struct sim *sim, long oldlength) { int mesh_cpy(struct meshs *, struct meshs *); int longarray_cpy (long **target, long **source,long,long); if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; if ( (sim->wlm[0] == 2) || (sim->wlm[1] == 2) ) mesh_cpy(&sim->wl.mesh,&sim->wl.origmesh); if ( (sim->wlm[0] == 5) || (sim->wlm[1] == 5)||(sim->wlm[0] == 6) || (sim->wlm[1] == 6) ) { longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,oldlength); sim->wl.radiusholemax = oldlength; } sim->wl.partincontact = sim->wl.partincontactold; } } void wlaccept(int wlm,struct wls *wl) { int i; if ( wlm > 0 ) { for (i=0;i<2;i++) (*wl).currorder[i] = (*wl).neworder[i]; (*wl).weights[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]] -= (*wl).alpha; (*wl).hist[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]]++; } } /*..............................................................................*/ /*........................NAMETIC ORDER.........................................*/ /*..............................................................................*/ /* Calculates the instantaneous value of the nematic order parameter for the specified configuration. The nematic director is determined by diagonalisation of the tensor order parameter Q (see Allen & Tildesley p305). The order parameter is the corresponding eigenvalue. However, it is equivalent to take minus two times the middle eigenvalue (see Eppenga & Frenkel, Mol Phys vol. 52, p.1303-1334 [1984]), and this is more reliable for comparing the isotropic phase. This is the approach taken in this implementation. Routines from Numerical Recipes are used to perform the diagonalisation. Note that these routines expect an n*n matrix to be stored in elements [1...n][1...n], rather than [0...n-1][0...n-1], so the arrays must be declared with one more element in each dimension. */ double nematic(long npart, struct particles *p) { double q[4][4] = {{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}}; double d[4], e[4]; long i; void tred2(double [4][4], double [4], double [4]); void tqli(double [4], double [4]); for (i=0; i<npart; i++) { q[1][1] += p[i].dir.x * p[i].dir.x; q[1][2] += p[i].dir.x * p[i].dir.y; q[1][3] += p[i].dir.x * p[i].dir.z; q[2][1] += p[i].dir.y * p[i].dir.x; q[2][2] += p[i].dir.y * p[i].dir.y; q[2][3] += p[i].dir.y * p[i].dir.z; q[3][1] += p[i].dir.z * p[i].dir.x; q[3][2] += p[i].dir.z * p[i].dir.y; q[3][3] += p[i].dir.z * p[i].dir.z; } q[1][1] = (q[1][1] * 3.0 / npart - 1.0) / 2.0; q[1][2] = (q[1][2] * 3.0 / npart ) / 2.0; q[1][3] = (q[1][3] * 3.0 / npart ) / 2.0; q[2][1] = (q[2][1] * 3.0 / npart ) / 2.0; q[2][2] = (q[2][2] * 3.0 / npart - 1.0) / 2.0; q[2][3] = (q[2][3] * 3.0 / npart ) / 2.0; q[3][1] = (q[3][1] * 3.0 / npart ) / 2.0; q[3][2] = (q[3][2] * 3.0 / npart ) / 2.0; q[3][3] = (q[3][3] * 3.0 / npart - 1.0) / 2.0; tred2 (q, d, e); tqli (d, e); /* Sort eigenvalues */ if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; } if (d[2] > d[3]) { d[0]=d[2]; d[2]=d[3]; d[3]=d[0]; } if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; } return -2.0*d[2]; } /*..............................................................................*/ /* Returns the coefficient of the Fourier series term with period boxlength/n in the z direction. The coefficients of the sine and cosine terms are added in quadrature and returned, making the result independent of phase shifts in the z direction. A significantly non-zero value indicates layering of the particles in the z direction with periodicity boxlength/n. */ double smectic(long npart, struct particles *p, long n) { double a, b; double omega = 8.0*n*atan(1.0); long i; a = b = 0.0; for (i=0; i<npart; i++) { a += cos(omega * p[i].pos.z); b += sin(omega * p[i].pos.z); } a /= (double)npart; b /= (double)npart; return sqrt(a*a + b*b); } /*..............................................................................*/ /*........................Z ORDER PARAMETER.....................................*/ long z_order(struct wls *wl, struct conf * conf,int wli) { // printf("%f %ld\n",particle[0].pos.z * box.z,lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli])); /* Because older C compilators do not know lround we can use ceil as well return lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli]);*/ /*printf("pos Z %f ",conf->particle[0].pos.z ); printf("%f ",conf->syscm.z); printf("%f ",conf->box.z); printf("%f ", wl->minorder[wli]); printf("dorder %f \n", wl->dorder[wli] );*/ return (long) ceil( ((conf->particle[0].pos.z - conf->syscm.z) * conf->box.z- wl->minorder[wli]) / wl->dorder[wli] ); } /*..............................................................................*/ /*........................2 particles distance.....................................*/ long twopartdist(struct wls *wl, struct conf * conf, int wli) { struct vector r_cm; r_cm.x = conf->particle[0].pos.x - conf->particle[1].pos.x; r_cm.y = conf->particle[0].pos.y - conf->particle[1].pos.y; r_cm.z = conf->particle[0].pos.z - conf->particle[1].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); return (long) ceil( (( sqrt(r_cm.x*r_cm.x + r_cm.y*r_cm.y) ) - wl->minorder[wli]) / wl->dorder[wli] ); } /*..............................................................................*/ /*........................alignment ORDER PARAMETER.....................................*/ double alignment_order(struct conf * conf, struct topo * topo) { double sumdot=0; long i,j; struct vector r_cm; struct vector image(struct vector, struct vector, struct vector); for (i = 0; i < topo->npart - 1; i++) { for (j = i + 1; j < topo->npart; j++) { r_cm = image(conf->particle[i].pos, conf->particle[j].pos, conf->box); if ( DOT(r_cm,r_cm) < 1.5*1.5 ) { sumdot+= DOT(conf->particle[i].dir,conf->particle[j].dir); } } } return sumdot; } /*..............................................................................*/ /*........................HOLE IN MESH-MEMBRANE ORDER PARAM.....................*/ /* return change in order parameter when one particle moves*/ long meshorder_moveone(struct vector oldpos, struct vector newpos, struct meshs *mesh, long npart, long target, struct conf * conf, struct sim * sim, int wli) { int change; int nx,ny,ox,oy; /* position in mesh */ double resid; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); int mesh_addpart(double, double, int **, int [2]); int mesh_removepart(double, double, int **, int [2]); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; nx = (int) (INBOX(newpos.x,resid) * (*mesh).dim[0]); ny = (int) (INBOX(newpos.y,resid) * (*mesh).dim[1]); ox = (int) (INBOX(oldpos.x,resid) * (*mesh).dim[0]); oy = (int) (INBOX(oldpos.y,resid) * (*mesh).dim[1]); if ( (nx == ox) && (ny == oy) ) return sim->wl.currorder[wli]; /* particle stayed in the same mesh bin*/ change = mesh_addpart(newpos.x,newpos.y,&(*mesh).data,(*mesh).dim); if (change) { change = mesh_removepart(oldpos.x,oldpos.y,&(*mesh).data,(*mesh).dim); } if ( !change ) { /* fill the mesh with particles*/ mesh_fill(mesh,npart,conf->particle, sim); return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]); } return sim->wl.currorder[wli]; } /* return change in order parameter when chain moves*/ long meshorder_movechain(long chain[MAXN], struct meshs *mesh, long npart, struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL], int wli) { long i,current; int change; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); int mesh_addpart(double, double, int **, int [2]); int mesh_removepart(double, double, int **, int [2]); change= 1; i = 0; current = chain[0]; while ( (current >=0 ) && (change) ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { change = mesh_addpart(conf->particle[current].pos.x, conf->particle[current].pos.y, &(*mesh).data, (*mesh).dim); } i++; current = chain[i]; } i = 0; current = chain[0]; while ( (current >=0 ) && (change) ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { change = mesh_removepart(chorig[i].pos.x, chorig[i].pos.y, &(*mesh).data, (*mesh).dim); } i++; current = chain[i]; } if ( !change ) { /* fill the mesh with particles*/ mesh_fill(mesh,npart,conf->particle, sim); return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]); } return sim->wl.currorder[wli]; } /* filling the mesh */ void mesh_fill(struct meshs *mesh, long npart, struct particles *particle, struct sim * sim) { long i; int mesh_addpart(double posx, double posy, int **mesh, int dim[2]); for ( i=0; i<((*mesh).dim[0] * (*mesh).dim[1]); i++) { (*mesh).data[i] = 0; } for (i=0; i<npart; i++) { /*calculate position of particle on mesh and add it to all where it belongs */ if (particle[i].type == sim->wl.wlmtype) mesh_addpart(particle[i].pos.x,particle[i].pos.y, &(*mesh).data, (*mesh).dim); } } /* add particle on coordinates posx posy to mesh return 0 if it was placed on empty spot*/ int mesh_addpart(double posx, double posy, int **mesh, int dim[2]) { int i, square[9], onhole; double resid; void mesh_square(int , int , int [2], int (*)[9]); onhole = 1; mesh_square( (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square); for(i=0;i<9;i++) { if ( (square[i] >= dim[0]*dim[1])||(square[i] <0) ) { printf ("Error: trying to write to %d\n",square[i]); printf ("%d %d and %d\n", (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]),i ); fflush(stdout); } if ( ((*mesh)[ square[i] ]) >= 0 ) onhole = 0; (*mesh)[ square[i] ]--; } return onhole; } /* remove particle on coordinates posx posy from mesh and return 0 if there is a empty spot now*/ int mesh_removepart(double posx, double posy, int **mesh, int dim[2]) { int i, square[9]; double resid; void mesh_square(int , int , int [2], int (*)[9]); mesh_square((int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square); for(i=0;i<9;i++) { //DEBUG if (square[i] >= dim[0]*dim[1]) printf ("Error: trying to write to %d\n",square[i]); (*mesh)[ square[i] ]++; if ( ((*mesh)[ square[i] ]) == 0 ) return 0; } return 1; } void mesh_square(int x, int y, int dim[2], int (*square)[9]) { int a,b; b=y; (*square)[0] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[1] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[2] = a + dim[0]*b; b = y-1; if ( b<0 ) b = dim[1]-1; (*square)[3] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[4] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[5] = a + dim[0]*b; b = y+1; if ( b==dim[1] ) b = 0; (*square)[6] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[7] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[8] = a + dim[0]*b; } void mesh_neighbors(int pos, int dim[2], int neighbors[4]) { int x,y,a; x = pos % dim[0]; y = pos / dim[0]; a = x-1; if ( a<0 ) a = dim[0]-1; neighbors[0] = a + dim[0]*y; a = x+1; if ( a==dim[0] ) a = 0; neighbors[1] = a + dim[0]*y; a = y-1; if ( a<0 ) a = dim[1]-1; neighbors[2] = x + dim[0]*a; a = y+1; if ( a==dim[1] ) a = 0; neighbors[3] = x + dim[0]*a; } /* returns the number of holes and a list of mesh points belonging to each of them */ int mesh_findholes(struct meshs *mesh) { int i,j, k, n, size, li, maxsize; int neighbors[4]; void mesh_neighbors(int, int [2], int [4]); n=0; maxsize = 0; for (i=0;i<((*mesh).dim[0] * (*mesh).dim[1]);i++) { (*mesh).tmp[i] = 0; if ( (*mesh).data[i] > 0 ) (*mesh).data[i] = 0; } i=0; // go through all mesh points while ( i < ((*mesh).dim[0] * (*mesh).dim[1]) ) { // test if mesh point is occupied if ( (*mesh).data[i] != 0 ) { i++; } else { // mesh point is free, create a new cluster n++; (*mesh).data[i] = n; // start new cluster, put mesh point as first element, and set list pointer on first element //DEBUG if (n >= mesh.dim[0]*mesh.dim[1]) printf ("Error: trying to write to sizes position %d\n",n); size = 1; (*mesh).tmp[0] = i; li = 0; // go through all elements of the cluster while ( li < size ) { //go through all neighbors j = (*mesh).tmp[li]; mesh_neighbors(j, (*mesh).dim, neighbors); for ( k=0; k<4; k++ ) { // test if status is free and append it to the cluster if ( (*mesh).data[ neighbors[k] ] == 0 ) { (*mesh).data[ neighbors[k] ] = n; // append mesh point as element in the list (*mesh).tmp[size] = neighbors[k]; size++; } if ( (*mesh).data[ neighbors[k] ] > 0 && (*mesh).data[ neighbors[k] ]<n ) { fprintf(stderr,"Error: Mesh cluster out of range, propably going infinite through pbc."); fflush(stderr); } } li++; } if (size > maxsize) maxsize = size; } } return maxsize; } int mesh_init(struct meshs *mesh, double meshsize, long npart, struct conf * conf, struct sim * sim) { // int i; int maxsize,length; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); (*mesh).dim[0] = (int)(conf->box.x/meshsize); (*mesh).dim[1] = (int)(conf->box.y/meshsize); if ( (*mesh).data != NULL ) free((*mesh).data); if ( (*mesh).tmp != NULL ) free((*mesh).tmp); length = (*mesh).dim[0] * (*mesh).dim[1]; (*mesh).data = malloc( sizeof(int)* (length)); (*mesh).tmp = malloc( sizeof(int)* (length+1)); /* fill the mesh with particles*/ mesh_fill(mesh, npart,conf->particle, sim); /* perfrom hole cluster algorithm */ maxsize = mesh_findholes(mesh); /*DEBUG printf("maxsize: %d\n",maxsize); printf("mesh:\n"); for (i=0;i<mesh.dim[0]*mesh.dim[1];i++) { printf("%d ",mesh.data[i]); if ( ((i+1) % mesh.dim[0]) == 0) printf("\n"); }*/ return maxsize; } void mesh_print (struct meshs *mesh) { int i; int mesh_findholes(struct meshs *); printf("mesh:\n"); for (i=0;i<(*mesh).dim[0] * (*mesh).dim[1];i++) { printf("%d ",(*mesh).data[i]); if ( ((i+1) % (*mesh).dim[0]) == 0) printf("\n"); } printf("hole %d:\n", mesh_findholes(mesh) ); printf("\n"); } int mesh_cpy (struct meshs *target, struct meshs *source) { if ( (*target).data != NULL) { if ( ((*target).dim[0] == (*source).dim[0]) && ((*target).dim[1] == (*source).dim[1]) ) { memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) ); return 0; } else { free ((*target).data); if ( (*source).dim[0] * (*source).dim[1] > (*target).dim[0] * (*target).dim[1] ) { if ((*target).tmp != NULL ) free ((*target).tmp); (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1)); } } } (*target).dim[0] = (*source).dim[0]; (*target).dim[1] = (*source).dim[1]; (*target).data = malloc( sizeof(int)* ((*target).dim[0] * (*target).dim[1])); if ((*target).tmp == NULL ) (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1)); memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) ); return 0; } int mesh_end(struct meshs *mesh) { /* free allocated memory */ if ( (*mesh).data!= NULL ) free((*mesh).data); if ( (*mesh).tmp!= NULL ) free((*mesh).tmp); return 0; } /*..............................................................................*/ /*........................RADIUS HOLE IN CENTER MEMBRANE ORDER PARAM............*/ /*return current bin of free radius*/ long radiushole_order(struct sim * sim) { long i; for (i=0;i<sim->wl.radiusholemax-3;i++){ if ((sim->wl.radiushole[i] >0 ) && (sim->wl.radiushole[i+1] >0 ) && (sim->wl.radiushole[i+2] >0 ) && (sim->wl.radiushole[i+3] >0 )) return i-1; } return -100; } /*return order of given radius */ long radiushole_position(double radius, struct sim * sim, int wli) { return (long) ceil( ( radius - sim->wl.minorder[wli]) / sim->wl.dorder[wli] ); } /* return change in order parameter when one particle moves*/ long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli, struct vector *position) { long nr,or; /* position in radiushole */ double rx,ry,z; BOOL oz,nz; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); void radiushole_print (long *radiushole, long length); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; z=conf->particle[target].pos.z - position->z; /*if above position*/ if (z-anint(z) < 0) nz = FALSE; else nz=TRUE; z=oldpos->z - position->z; /*if above position*/ if (z-anint(z) < 0) oz = FALSE; else oz=TRUE; if ( !(nz) && !(oz) ) return sim->wl.currorder[wli]; rx = conf->box.x * (conf->particle[target].pos.x - anint(conf->particle[target].pos.x)); ry = conf->box.y * (conf->particle[target].pos.y - anint(conf->particle[target].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; /*particle move over radius bins*/ if (nz) { sim->wl.radiushole[nr]++; } if (oz) { rx = conf->box.x * (oldpos->x - anint(oldpos->x)); ry = conf->box.y * (oldpos->y - anint(oldpos->y)); or = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); sim->wl.radiushole[or]--; if ( sim->wl.radiushole[or] < 0 ) { printf ("Error(single particle move): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",or); radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax); fflush(stdout); } if (sim->wl.radiushole[or] ==0) return radiushole_order(sim); } if ( (nz) && (sim->wl.radiushole[nr] ==1) ) { return radiushole_order(sim); } return sim->wl.currorder[wli]; } /* return change in order parameter when chain moves*/ long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli, struct vector *position) { long i,current,nr; double rx,ry,z; BOOL change=FALSE; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); void radiushole_print (long *radiushole, long length); i = 0; rx=0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { z=conf->particle[current].pos.z - position->z; /*if above system CM*/ if (z-anint(z) > 0) { rx = conf->box.x * (conf->particle[current].pos.x - anint(conf->particle[current].pos.x)); ry = conf->box.y * (conf->particle[current].pos.y - anint(conf->particle[current].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; sim->wl.radiushole[nr]++; if ( sim->wl.radiushole[nr] == 1 ) change = TRUE; } } i++; current = chain[i]; } i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { z=chorig[i].pos.z - position->z; /*if above system CM*/ if (z-anint(z) > 0) { rx = conf->box.x * (chorig[i].pos.x - anint(chorig[i].pos.x)); ry = conf->box.y * (chorig[i].pos.y - anint(chorig[i].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); sim->wl.radiushole[nr]--; if ( sim->wl.radiushole[nr] < 0 ) { printf ("Error (chainmove): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",nr); radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax); fflush(stdout); } if ( sim->wl.radiushole[nr] == 0 ) change = TRUE; } } i++; current = chain[i]; } if ( change ) { return radiushole_order(sim); } return sim->wl.currorder[wli]; } /* filling the radiushole above vec*/ long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli, struct vector *position) { long i,nr,radiusholemax; double rx,ry,z; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); radiusholemax = radiushole_position(sqrt(conf->box.x*conf->box.x+conf->box.y*conf->box.y),sim,wli); if ( radiusholemax > sim->wl.radiusholemax ) { if (sim->wl.radiushole != NULL) free(sim->wl.radiushole); sim->wl.radiushole = malloc( sizeof(long)* (radiusholemax)); sim->wl.radiusholemax = radiusholemax; } for (i=0;i<radiusholemax;i++) { sim->wl.radiushole[i] = 0; } for (i=0; i< topo->npart; i++) { /*calculate position of particle from z axis, and add it in array */ if ( conf->particle[i].type == sim->wl.wlmtype ) { z=conf->particle[i].pos.z - (*position).z; /*if above position*/ if (z-anint(z) > 0) { rx = conf->box.x * (conf->particle[i].pos.x - anint(conf->particle[i].pos.x)); ry = conf->box.y * (conf->particle[i].pos.y - anint(conf->particle[i].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; sim->wl.radiushole[nr]++; } } } return radiushole_order(sim); } void radiushole_print (long *radiushole, long length) { long i; printf("radiushole:\n"); for (i=0;i<length;i++) { printf("%ld ",radiushole[i]); } printf("\n"); } int longarray_cpy (long **target, long **source, long targetlength, long sourcelength) { /*if ( (*target) != NULL) { if ( targetlength == sourcelength ) { memcpy((*target),(*source), sizeof(long)*(sourcelength)); return 0; } else { free(*target); } }*/ if ( (*target) != NULL) (*target) = (long*) realloc((*target), sizeof(long)*(sourcelength)); else (*target) = malloc( sizeof(long)*(sourcelength)); memcpy((*target),(*source), sizeof(long)*(sourcelength)); return 0; } /*..............................................................................*/ /* ............................... particles in contact ..................... */ /*return order for particles in contact */ long contparticles_order(struct sim * sim, int wli) { return (long) ceil( ( sim->wl.partincontact - sim->wl.minorder[wli]) / sim->wl.dorder[wli] ); } /*returns if particle is in contact*/ BOOL particleinncontact (struct vector *vec, struct conf *conf) { double x,y,z; double anint(double); x = vec->x - conf->particle[0].pos.x; y = vec->y - conf->particle[0].pos.y; z = vec->z - conf->particle[0].pos.z; x = conf->box.x * (x - anint(x)); y = conf->box.y * (y - anint(y)); z = conf->box.z * (z - anint(z)); if ( x*x + y*y + z*z < WL_CONTACTS) { return TRUE; } else { return FALSE; } } /* return change in number of particles in contact when one particle moves*/ long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli) { long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; if ( particleinncontact (&(conf->particle[target].pos),conf) ) sim->wl.partincontact++; if ( particleinncontact (oldpos,conf) ) sim->wl.partincontact--; return contparticles_order(sim,wli); } /* return change in order parameter when chain moves*/ long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli) { long i,current; long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { if ( particleinncontact (&(conf->particle[current].pos),conf) ) sim->wl.partincontact++; } i++; current = chain[i]; } i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { if ( particleinncontact (&(chorig[i].pos),conf) ) sim->wl.partincontact--; } i++; current = chain[i]; } return contparticles_order(sim,wli); } /* filling all particles in the contact */ long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli) { long i; long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); sim->wl.partincontact = 0; for (i=1; i< topo->npart; i++) { /*calculate position of particle and add it if in contact */ if ( conf->particle[i].type == sim->wl.wlmtype ) { if ( particleinncontact (&(conf->particle[i].pos),conf) ) sim->wl.partincontact++; } } return contparticles_order(sim,wli); } /*..............................................................................*/ /*........................GEOMETRIC STUFF.......................................*/ /*..............................................................................*/ /*..............................................................................*/ /* Find closest distance between line segments and return its vector gets orientations and lengths of line segments and the vector connecting their center os masses (from vec1 to vec2) */ // Copyright 2001, softSurfer (www.softsurfer.com) // This code may be freely used and modified for any purpose // providing that this copyright notice is included with it. // SoftSurfer makes no warranty for this code, and cannot be held // liable for any real or imagined damage resulting from its use. // Users of this code must verify correctness for their application. struct vector mindist_segments(struct vector dir1, double halfl1, struct vector dir2, double halfl2, struct vector r_cm) { struct vector u,v,w,vec; double a,b,c,d,e,D,sc,sN,sD,tc,tN,tD; struct vector vec_scale(struct vector, double); u = vec_scale(dir1,2.0*halfl1); //S1.P1 - S1.P0; v = vec_scale(dir2,2.0*halfl2); //S2.P1 - S2.P0; w.x = dir2.x*halfl2 - dir1.x*halfl1 - r_cm.x; w.y = dir2.y*halfl2 - dir1.y*halfl1 - r_cm.y; w.z = dir2.z*halfl2 - dir1.z*halfl1 - r_cm.z; //S1.P0 - S2.P0; a = DOT(u,u); // always >= 0 b = DOT(u,v); c = DOT(v,v); // always >= 0 d = DOT(u,w); e = DOT(v,w); D = a*c - b*b; // always >= 0 sc = D; sN = D; sD = D; // sc = sN / sD, default sD = D >= 0 tc = D; tN = D; tD = D; // tc = tN / tD, default tD = D >= 0 // compute the line parameters of the two closest points if (D < 0.00000001) { // the lines are almost parallel sN = 0.0; // force using point P0 on segment S1 sD = 1.0; // to prevent possible division by 0.0 later tN = e; tD = c; } else { // get the closest points on the infinite lines sN = (b*e - c*d); tN = (a*e - b*d); if (sN < 0.0) { // sc < 0 => the s=0 edge is visible sN = 0.0; tN = e; tD = c; } else if (sN > sD) { // sc > 1 => the s=1 edge is visible sN = sD; tN = e + b; tD = c; } } if (tN < 0.0) { // tc < 0 => the t=0 edge is visible tN = 0.0; // recompute sc for this edge if (-d < 0.0) sN = 0.0; else if (-d > a) sN = sD; else { sN = -d; sD = a; } } else if (tN > tD) { // tc > 1 => the t=1 edge is visible tN = tD; // recompute sc for this edge if ((-d + b) < 0.0) sN = 0; else if ((-d + b) > a) sN = sD; else { sN = (-d + b); sD = a; } } // finally do the division to get sc and tc if (fabs(sN) < 0.00000001) sc = 0.0 ; else sc = sN / sD; if (fabs(tN) < 0.00000001) tc = 0.0 ; else tc = tN / tD; // get the difference of the two closest points //Vector = w + (sc * u) - (tc * v); // = S1(sc) - S2(tc) vec.x = u.x*sc + w.x - v.x*tc; vec.y = u.y*sc + w.y - v.y*tc; vec.z = u.z*sc + w.z - v.z*tc; return vec; } /*..............................................................................*/ /* Find closest distance between line segment and point and return it as vector (from point to closest segment point) Function gets orientation and length of line segments and the vector connecting their center os masses (from segment to point) */ struct vector mindist_segmentpoint(struct vector dir1, double length, struct vector r_cm) { struct vector vec; double c,d,halfl; halfl=length*0.5; c = DOT(dir1,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } vec.x = - r_cm.x + dir1.x * d; vec.y = - r_cm.y + dir1.y * d; vec.z = - r_cm.z + dir1.z * d; return vec; } /*..............................................................................*/ /* Determines whether two particles overlap. Returns 1 if there is an overlap, 0 if not. */ int overlap(struct particles part1, struct particles part2, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { double b, c, d, e, f; /* Coefficients in distance quadratic */ double boundary; /* Half length of central boundary zone of quadratic */ double det; double halfl; /* Half length of cylinder */ double s0, t0; /* det times location of min separation of infinite lines */ double ss, tt; /* Location of min separation of line segments */ struct vector r_cm; /* Vector between centres of mass */ double dist; /* Distance between particles*/ struct vector distvec; /* Distance vector between particles*/ double linemin(double, double); struct vector image(struct vector, struct vector, struct vector); r_cm = image(part1.pos, part2.pos, box); if ((part1.type >= SP) && (part2.type >= SP)) { /*we have two spheres - most common, do nothing*/ dist=sqrt(DOT(r_cm,r_cm)); } else { if ((ia_params[part1.type][part2.type].geotype[0] < SP) && (ia_params[part1.type][part2.type].geotype[1] < SP)) { /*we have two spherocylinders*/ /*finding closes contact between them*/ b = -DOT(part1.dir, part2.dir); d = DOT(part1.dir, r_cm); e = -DOT(part2.dir, r_cm); f = DOT(r_cm, r_cm); det = 1.0 - b*b; //halfl = length / 2.0; // Just take the mean halfl = ia_params[part1.type][part2.type].half_len[0] = ia_params[part1.type][part2.type].half_len[1]; halfl /= 2; boundary = det * halfl; /* Location of smallest separation of the infinite lines */ s0 = b*e - d; t0 = b*d - e; /* Location of smallest separation of line segments */ if (s0 >= boundary) { if (t0 >= boundary) { /* Region 2 */ if ( d + halfl + halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 1 */ ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 8 */ if ( d + halfl - halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } else if (s0 >= -boundary) { if (t0 >= boundary) { /* Region 3 */ tt = halfl; ss = linemin( -tt*b - d, halfl ); } else if (t0 >= -boundary) { /* Region 0 */ ss = s0/det; tt = t0/det; } else { /* Region 7 */ tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } else { if (t0 >= boundary) { /* Region 4 */ if ( d - halfl + halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 5 */ ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 6 */ if ( d - halfl - halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } /*ss snd tt are Location of min separation of line segments */ dist=sqrt(f + ss*ss + tt*tt + 2.0*(ss*d + tt*e + ss*tt*b)); } else { if (ia_params[part1.type][part2.type].geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ //halfl=length/2;/*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part1.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = - r_cm.x + part1.dir.x * d; distvec.y = - r_cm.y + part1.dir.y * d; distvec.z = - r_cm.z + part1.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ //halfl=length/2; /*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[1];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part2.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = r_cm.x - part2.dir.x * d; distvec.y = r_cm.y - part2.dir.y * d; distvec.z = r_cm.z - part2.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } } } /* Overlap exists if smallest separation is less than diameter of cylinder */ if (dist < ia_params[part1.type][part2.type].sigma*0.5 ) { return 1; } else { return 0; } } /*..............................................................................*/ double linemin(double criterion, double halfl) { if (criterion >= halfl) { return halfl; } else if (criterion >= -halfl) { return criterion; } else { return -halfl; } } /*..............................................................................*/ /*........................SOME USEFUL MATH......................................*/ /*..............................................................................*/ /* ran2 from Numerical Recipes. */ #define IM1 2147483563 #define IM2 2147483399 #define AM (1.0/IM1) #define IMM1 (IM1-1) #define IA1 40014 #define IA2 40692 #define IQ1 53668 #define IQ2 52774 #define IR1 12211 #define IR2 3791 #define NTAB 32 #define NDIV (1+IMM1/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) double ran2(long *idum) { int j; long k; static long idum2=123456789; static long iy=0; static long iv[NTAB]; double temp; if (*idum <= 0) { if (-(*idum) < 1) *idum=1; else *idum = -(*idum); idum2=(*idum); for (j=NTAB+7;j>=0;j--) { k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; if (j < NTAB) iv[j] = *idum; } iy=iv[0]; } k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; k=idum2/IQ2; idum2=IA2*(idum2-k*IQ2)-k*IR2; if (idum2 < 0) idum2 += IM2; j=iy/NDIV; iy=iv[j]-idum2; iv[j] = *idum; if (iy < 1) iy += IMM1; if ((temp=AM*iy) > RNMX) return RNMX; else { return temp; } } #undef IM1 #undef IM2 #undef AM #undef IMM1 #undef IA1 #undef IA2 #undef IQ1 #undef IQ2 #undef IR1 #undef IR2 #undef NTAB #undef NDIV #undef EPS #undef RNMX /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ void tred2(double a[4][4], double d[4], double e[4]) { int l, k, j, i; double scale, hh, h, g, f; for (i=3; i>=2; i--) { l=i-1; h=scale=0.0; if (l > 1) { for (k=1;k<=l;k++) scale += fabs(a[i][k]); if (scale == 0.0) e[i]=a[i][l]; else { for (k=1;k<=l;k++) { a[i][k] /= scale; h += a[i][k]*a[i][k]; } f=a[i][l]; g=(f >= 0.0 ? -sqrt(h) : sqrt(h)); e[i]=scale*g; h -= f*g; a[i][l]=f-g; f=0.0; for (j=1;j<=l;j++) { /* a[j][i]=a[i][j]/h; */ g=0.0; for (k=1;k<=j;k++) g += a[j][k]*a[i][k]; for (k=j+1;k<=l;k++) g += a[k][j]*a[i][k]; e[j]=g/h; f += e[j]*a[i][j]; } hh=f/(h+h); for (j=1;j<=l;j++) { f=a[i][j]; e[j]=g=e[j]-hh*f; for (k=1;k<=j;k++) a[j][k] -= (f*e[k]+g*a[i][k]); } } } else e[i]=a[i][l]; d[i]=h; } /* d[1]=0.0; */ e[1]=0.0; for (i=1; i<=3; i++) { /* l=i-1; if (d[i]) { for (j=1;j<=l;j++) { g=0.0; for (k=1;k<=l;k++) g += a[i][k]*a[k][j]; for (k=1;k<=l;k++) a[k][j] -= g*a[k][i]; } } */ d[i]=a[i][i]; /* a[i][i]=1.0; for (j=1;j<=l;j++) a[j][i]=a[i][j]=0.0; */ } } /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ #define NRANSI #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) void tqli(double d[4], double e[4]) { double pythag(double a, double b); int m, l, iter, i; /* int k; */ double s, r, p, g, f, dd, c, b; for (i=2; i<=3; i++) e[i-1] = e[i]; e[3] = 0.0; for (l=1; l<=3; l++) { iter = 0; do { for (m=l; m<=3-1; m++) { dd = fabs(d[m]) + fabs(d[m+1]); if ((double)(fabs(e[m])+dd) == dd) break; } if (m != l) { if (iter++ == 30) { fprintf(stderr, "Too many iterations in tqli\n"); exit (2); } g = (d[l+1] - d[l]) / (2.0*e[l]); r = pythag(g, 1.0); g = d[m] - d[l] + e[l] / (g + SIGN(r,g)); s = c = 1.0; p = 0.0; for (i=m-1; i>=l; i--) { f = s * e[i]; b = c * e[i]; e[i+1] = (r=pythag(f,g)); if (r == 0.0) { d[i+1] -= p; e[m] = 0.0; break; } s = f/r; c = g/r; g = d[i+1] - p; r = (d[i] - g)*s + 2.0*c*b; d[i+1] = g+(p=s*r); g = c*r - b; /* for (k=1; k<=3; k++) { f = z[k][i+1]; z[k][i+1] = s*z[k][i]+c*f; z[k][i] = c*z[k][i]i - s*f; } */ } if (r == 0.0 && i >= l) continue; d[l] -= p; e[l] = g; e[m] = 0.0; } } while (m != l); } } #undef NRANSI /*..............................................................................*/ /* From Numerical Recipes. Used by tqli. */ #define NRANSI static double sqrarg; #define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg) double pythag(double a, double b) { double absa, absb; absa = fabs(a); absb = fabs(b); if (absa > absb) return absa*sqrt(1.0+SQR(absb/absa)); else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb))); } #undef NRANSI /*..............................................................................*/ /* Normalise a vector to have unit length. For speed during heavy use, it is not checked that the supplied vector has non-zero length. */ void normalise(struct vector *u) { double tot; tot = sqrt( DOT(*u,*u) ); if (tot !=0.0) { tot=1/tot; (*u).x *= tot; (*u).y *= tot; (*u).z *= tot; } } /* Returns the vector pointing from the centre of mass of particle 2 to the centre of mass of the closest image of particle 1. */ struct vector image(struct vector r1, struct vector r2, struct vector box) { struct vector r12; double anint(double); r12.x = r1.x - r2.x; r12.y = r1.y - r2.y; r12.z = r1.z - r2.z; r12.x = box.x * (r12.x - anint(r12.x)); r12.y = box.y * (r12.y - anint(r12.y)); r12.z = box.z * (r12.z - anint(r12.z)); return r12; } /* Returns the nearest integer to its argument as a double precision number. e.g. anint(-0.49) = 0.0 and anint(-0.51) = -1.0. Equivalent to the Fortran intrinsic ANINT. */ double anint(double arg) { if (arg < 0) { return (double)( (long)(arg-0.5) ); } else { return (double)( (long)(arg+0.5) ); } } /*..............................................................................*/ /* Returns an evenly distributed random unit vector of unit length. See Allen & Tildesley p349 or Frenkel & Smit p410. RANDOM VECTOR ON UNIT SPHERE */ struct vector ranvec(void) { double a, b, xi1, xi2; struct vector unit; double ran2(long *); do { xi1 = 1.0 - 2.0*ran2(&seed); xi2 = 1.0 - 2.0*ran2(&seed); a = xi1*xi1 + xi2*xi2; } while (a > 1.0); b = 2.0 * sqrt(1.0 - a); unit.x = xi1 * b; unit.y = xi2 * b; unit.z = 1.0 - 2.0*a; return unit; } /** * returns a point randomly and evenly distributed inside of a unit sphere */ struct vector ranvecsph(void) { struct vector ranvec; double ran2(long *); do{ ranvec.x = 2 * ran2(&seed) - 1.0; ranvec.y = 2 * ran2(&seed) - 1.0; ranvec.z = 2 * ran2(&seed) - 1.0; } while(ranvec.x*ranvec.x + ranvec.y*ranvec.y + ranvec.z*ranvec.z >= 1); //printf("%lf\t%lf\t%lf\n", ranvec.x,ranvec.y,ranvec.z); return ranvec; } /**** some useful math *******/ struct vector vec_create(double x, double y, double z) { struct vector newvec; newvec.x=x; newvec.y=y; newvec.z=z; return newvec; } struct vector vec_createarr(double a[3]) { struct vector newvec; newvec.x=a[0]; newvec.y=a[1]; newvec.z=a[2]; return newvec; } double vec_dotproduct(struct vector A,struct vector B) { double dp; dp = A.x*B.x + A.y*B.y + A.z*B.z; return dp; } /* vector projection of vector A to direction of B*/ struct vector vec_project(struct vector* A,struct vector* B) { double dp; struct vector pr; dp = A->x*B->x + A->y*B->y + A->z*B->z; pr.x=B->x*dp; pr.y=B->y*dp; pr.z=B->z*dp; return pr; } void ortogonalise(struct vector *A, struct vector B) { double dp; double vec_dotproduct(struct vector A,struct vector B); dp=vec_dotproduct(*A,B); (*A).x -= B.x * dp; (*A).y -= B.y * dp; (*A).z -= B.z * dp; } /* vector projection of vector A perpendicular to direction of B*/ struct vector vec_perpproject(struct vector *A,struct vector *B) { struct vector pp; double dp; struct vector vec_project(struct vector *, struct vector*); dp=DOT((*A),(*B)); pp.x = A->x - B->x*dp; pp.y = A->y - B->y*dp; pp.z = A->z - B->z*dp; // fprintf (stderr, "pp x: %.8f y: %.8f z: %.8f \n",pp.x,pp.y,pp.z); return pp; } /* returns a vector perpendicular to A nothing special about the vector except that it's one of the perpendicular options and is normalized */ struct vector vec_perp(struct vector A) { double ratio,x,y; struct vector somevector; struct vector vec_create(double, double, double); struct vector vec_normalize(struct vector); void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); x=A.x; y=A.y; if (x == 0) x=1; else { if (y == 0) y=1; else { ratio=y/x; y=x*ratio*2; } } somevector= vec_create(x, y, A.z); normalise(&somevector); return vec_crossproduct(A,somevector); } /* Perform the multiplication of a matrix A and a vector B where A is the first argument and B is the second argument. The routine will return AxB*/ struct vector matrix_vec_multiply(double A[3][3],struct vector B) { int i; double vecarr[3]; struct vector AB,RA; struct vector vec_createarr(double[3]); double vec_dotproduct(struct vector,struct vector); for (i=0;i<3;i++) { /* index the row vector from A*/ RA=vec_createarr(A[i]); /* Now find the dot product of this row with B*/ vecarr[i]=vec_dotproduct(RA,B); } AB=vec_createarr(vecarr); return AB; } /* Distance between two vectors*/ double vec_distance(struct vector vec1,struct vector vec2) { double sum; sum= (vec1.x-vec2.x)*(vec1.x-vec2.x)+(vec1.y-vec2.y)*(vec1.y-vec2.y)+(vec1.z-vec2.z)*(vec1.z-vec2.z); return pow(sum,0.5); } /* Vector size */ double vec_size(struct vector vec) { double size; size=sqrt(vec.x*vec.x+ vec.y*vec.y+ vec.z*vec.z); return size; } /* Normalize a vector*/ struct vector vec_normalize(struct vector vec) { double mag; struct vector newvec; double vec_size(struct vector); mag= vec_size (vec); mag=1/mag; newvec.x=vec.x*mag; newvec.y=vec.y*mag; newvec.z=vec.z*mag; return newvec; } /* Scale a vector */ struct vector vec_scale(struct vector vec, double scale) { vec.x=vec.x*scale; vec.y=vec.y*scale; vec.z=vec.z*scale; return vec; } /* cross_product*/ struct vector vec_crossproduct(struct vector A,struct vector B) { struct vector cp; cp.x=( A.y*B.z - A.z*B.y); cp.y=( -A.x*B.z + A.z*B.x); cp.z=( A.x*B.y - A.y*B.x); return cp; } /* addition of vectors*/ inline struct vector vec_sum(struct vector A,struct vector B) { struct vector C; C.x=(A.x + B.x); C.y=(A.y + B.y); C.z=(A.z + B.z); return C; } /* subtraction of vectors*/ inline struct vector vec_sub(struct vector A,struct vector B) { struct vector C; C.x=(A.x - B.x); C.y=(A.y - B.y); C.z=(A.z - B.z); return C; } /* asign vlues of vector A by values in vector B*/ inline void vec_asign(struct vector *A, struct vector B) { (*A).x=B.x; (*A).y=B.y; (*A).z=B.z; } /* generate random unit vector*/ struct vector vec_random(void) { struct vector newvec; struct vector ranvec(void); newvec=ranvec(); return newvec; } /*generate random unit quaternion*/ struct quat quat_random(void) { double cosv, sinv; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ cosv = cos(PIH * ran2(&seed) ); if (ran2(&seed) <0.5) sinv = sqrt(1.0 - cosv*cosv); else sinv = -sqrt(1.0 - cosv*cosv); newquat.w=cosv; newquat.x=newaxis.x*sinv; newquat.y=newaxis.y*sinv; newquat.z=newaxis.z*sinv; return newquat; } /* Create quaternion for rotation around vector "vec" of angle in degrees "angle" function need cos of half angle and its sin*/ struct quat quat_create(struct vector vec, double vc, double vs) { struct quat newquat; newquat.w=vc; newquat.x=vec.x*vs; newquat.y=vec.y*vs; newquat.z=vec.z*vs; return newquat; } /*rotate vector with quaternion*/ void vec_rotate(struct vector *vec, struct quat quat) { double t2,t3,t4,t5,t6,t7,t8,t9,t10,newx,newy,newz; /* t1 = quat.w * quat.w; */ t2 = quat.w * quat.x; t3 = quat.w * quat.y; t4 = quat.w * quat.z; t5 = -quat.x * quat.x; t6 = quat.x * quat.y; t7 = quat.x * quat.z; t8 = -quat.y * quat.y; t9 = quat.y * quat.z; t10 = -quat.z * quat.z; newx = 2.0 * ( (t8+t10)*(*vec).x + (t6-t4)*(*vec).y + (t3+t7)*(*vec).z ) + (*vec).x; newy = 2.0 * ( (t4+t6)*(*vec).x + (t5+t10)*(*vec).y + (t9-t2)*(*vec).z ) + (*vec).y; newz = 2.0 * ( (t7-t3)*(*vec).x + (t2+t9)*(*vec).y + (t5+t8)*(*vec).z ) + (*vec).z; (*vec).x = newx; (*vec).y = newy; (*vec).z = newz; } /* rotate spherocylinder by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void psc_rotate(struct particles *psc, double max_angle,int geotype) { double vc, vs, t2, t3, t4, t5, t6, t7, t8, t9, t10; double d1, d2, d3, d4, d5, d6, d7, d8, d9 , newx, newy, newz; int k,m; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); // vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; /* do quaternion rotation*/ t2 = newquat.w * newquat.x; t3 = newquat.w * newquat.y; t4 = newquat.w * newquat.z; t5 = -newquat.x * newquat.x; t6 = newquat.x * newquat.y; t7 = newquat.x * newquat.z; t8 = -newquat.y * newquat.y; t9 = newquat.y * newquat.z; t10 = -newquat.z * newquat.z; d1 = t8 + t10; d2 = t6 - t4; d3 = t3 + t7; d4 = t4 + t6; d5 = t5 + t10; d6 = t9 - t2; d7 = t7 - t3; d8 = t2 + t9; d9 = t5 + t8; /*rotate spherocylinder direction vector*/ newx = 2.0 * ( d1*psc->dir.x + d2*psc->dir.y + d3*psc->dir.z ) + psc->dir.x; newy = 2.0 * ( d4*psc->dir.x + d5*psc->dir.y + d6*psc->dir.z ) + psc->dir.y; newz = 2.0 * ( d7*psc->dir.x + d8*psc->dir.y + d9*psc->dir.z ) + psc->dir.z; psc->dir.x = newx; psc->dir.y = newy; psc->dir.z = newz; m=1; if ( (geotype != SCN) && (geotype != SCA) ) { if ( (geotype == TPSC) || (geotype == TCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate patch direction vector*/ newx = 2.0 * ( d1*psc->patchdir[k].x + d2*psc->patchdir[k].y + d3*psc->patchdir[k].z ) + psc->patchdir[k].x; newy = 2.0 * ( d4*psc->patchdir[k].x + d5*psc->patchdir[k].y + d6*psc->patchdir[k].z ) + psc->patchdir[k].y; newz = 2.0 * ( d7*psc->patchdir[k].x + d8*psc->patchdir[k].y + d9*psc->patchdir[k].z ) + psc->patchdir[k].z; psc->patchdir[k].x = newx; psc->patchdir[k].y = newy; psc->patchdir[k].z = newz; /*rotate patch sides vectors*/ newx = 2.0 * ( d1*psc->patchsides[0+2*k].x + d2*psc->patchsides[0+2*k].y + d3*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].x; newy = 2.0 * ( d4*psc->patchsides[0+2*k].x + d5*psc->patchsides[0+2*k].y + d6*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].y; newz = 2.0 * ( d7*psc->patchsides[0+2*k].x + d8*psc->patchsides[0+2*k].y + d9*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].z; psc->patchsides[0+2*k].x = newx; psc->patchsides[0+2*k].y = newy; psc->patchsides[0+2*k].z = newz; newx = 2.0 * ( d1*psc->patchsides[1+2*k].x + d2*psc->patchsides[1+2*k].y + d3*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].x; newy = 2.0 * ( d4*psc->patchsides[1+2*k].x + d5*psc->patchsides[1+2*k].y + d6*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].y; newz = 2.0 * ( d7*psc->patchsides[1+2*k].x + d8*psc->patchsides[1+2*k].y + d9*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].z; psc->patchsides[1+2*k].x = newx; psc->patchsides[1+2*k].y = newy; psc->patchsides[1+2*k].z = newz; } } m=1; if ( (geotype == CHPSC) || (geotype == CHCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) { if ( (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate chiral direction vector*/ newx = 2.0 * ( d1*psc->chdir[k].x + d2*psc->chdir[k].y + d3*psc->chdir[k].z ) + psc->chdir[k].x; newy = 2.0 * ( d4*psc->chdir[k].x + d5*psc->chdir[k].y + d6*psc->chdir[k].z ) + psc->chdir[k].y; newz = 2.0 * ( d7*psc->chdir[k].x + d8*psc->chdir[k].y + d9*psc->chdir[k].z ) + psc->chdir[k].z; psc->chdir[k].x = newx; psc->chdir[k].y = newy; psc->chdir[k].z = newz; } } } /*returns a position of center of mass of system*/ void masscenter(long npart, struct ia_param ia_params[MAXT][MAXT], struct conf * conf) { long i; double anint(double); conf->syscm.x = 0; conf->syscm.y = 0; conf->syscm.z = 0; for (i=0; i<npart; i++) { /*using periodic boundary conditions*/ conf->syscm.x += (conf->particle[i].pos.x - anint(conf->particle[i].pos.x) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.y += (conf->particle[i].pos.y - anint(conf->particle[i].pos.y) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.z += (conf->particle[i].pos.z - anint(conf->particle[i].pos.z) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; } conf->syscm.x /= conf->sysvolume; conf->syscm.y /= conf->sysvolume; conf->syscm.z /= conf->sysvolume; return; } /* rotate cluster of particles by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void cluster_rotate(long target, struct vector gc, double max_angle, struct topo * topo, struct conf * conf) { long current,i; double vc,vs; //double quatsize; struct quat newquat; struct vector newaxis; struct vector ranvec(void); void vec_rotate(struct vector *, struct quat); // create rotation quaternion newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); //vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; //quatsize=sqrt(newquat.w*newquat.w+newquat.x*newquat.x+newquat.y*newquat.y+newquat.z*newquat.z); //shift position to geometrical center i=0; current = topo->chainlist[target][0]; while (current >=0 ) { //shift position to geometrical center conf->particle[current].pos.x -= gc.x; conf->particle[current].pos.y -= gc.y; conf->particle[current].pos.z -= gc.z; //scale things by box not to have them distorted conf->particle[current].pos.x *= conf->box.x; conf->particle[current].pos.y *= conf->box.y; conf->particle[current].pos.z *= conf->box.z; //do rotation vec_rotate(&conf->particle[current].pos, newquat); vec_rotate(&conf->particle[current].dir, newquat); vec_rotate(&conf->particle[current].patchdir[0], newquat); vec_rotate(&conf->particle[current].patchdir[1], newquat); vec_rotate(&conf->particle[current].chdir[0], newquat); vec_rotate(&conf->particle[current].chdir[1], newquat); vec_rotate(&conf->particle[current].patchsides[0], newquat); vec_rotate(&conf->particle[current].patchsides[1], newquat); vec_rotate(&conf->particle[current].patchsides[2], newquat); vec_rotate(&conf->particle[current].patchsides[3], newquat); //sclae back conf->particle[current].pos.x /= conf->box.x; conf->particle[current].pos.y /= conf->box.y; conf->particle[current].pos.z /= conf->box.z; //shift positions back conf->particle[current].pos.x += gc.x; conf->particle[current].pos.y += gc.y; conf->particle[current].pos.z += gc.z; i++; current = topo->chainlist[target][i]; } } /* put the particle in the original box using periodic boundary conditions in our system the particle positions are scaled by box size so to get them into original obx is to get htem between 0 and 1 and then scale this back by size of box*/ void origbox(struct vector *pos,struct vector box) { double anint(double); (*pos).x = box.x * ((*pos).x - anint((*pos).x)); (*pos).y = box.y * ((*pos).y - anint((*pos).y)); (*pos).z = box.z * ((*pos).z - anint((*pos).z)); } /* use of periodic boundary conditions*/ void usepbc(struct vector *pos,struct vector pbc) { do { (*pos).x += pbc.x; } while ((*pos).x < 0.0); do { (*pos).x -= pbc.x; } while ((*pos).x > pbc.x); do { (*pos).y += pbc.y; } while ((*pos).y < 0.0); do { (*pos).y -= pbc.y; } while ((*pos).y > pbc.y); do { (*pos).z += pbc.z; } while ((*pos).z < 0.0); do { (*pos).z -= pbc.z; } while ((*pos).z > pbc.z); } /*..............................................................................*/ /*.......................TEMPLATE FILES.........................................*/ /*..............................................................................*/ /* # Template for the "options" file. Options start with an '#'. # Pressure couplings: # 0 = anisotropic coupling, 1 = isotropic coupling, 2 = isotropic in xy z=const, 3 = isotropic # xy and keep Volume constant # Wang-Landau method: (with constant decrease of bias addition by factor of 2, until less than WL_ALPHATOL) # O = none, 1 = z-direction of 1st paticle, 2 = hole in xyplane, 3 = z-orientation of 0th particle # 4 = distance of first two particles, 5 = pore around z axis and above CM, 6 = pore around z axis and above 0th particle # 7 = number of particles in contact (within distance sqrt(WL_CONTACTS)) ptype = 1 # Pressure coupling type (0-anisotropic xyz, 1-isotropic xyz, 2 - isotropic in xy z=const, 3 - isotropic in xy and V=const) press = 1 # Pressure paralpress = 1 # Parallel pressure for replica exchange shave = 0 # Average number of volume change attempts per sweep (usually 1) nequil = 0 # Number of equilibration sweeps adjust = 0 # Number of equilibration sweeps between step size adjustments nsweeps = 1000000 # Number of production sweeps paramfrq = 1000000 # Number of sweeps between order parameter samples report = 1000000 # Number of sweeps between statistics reports nrepchange = 1000 # Number of sweeps between replica exchanges movie = 100000 # Number of sweeps between movie frames (0 = no movie) chainprob = 0.0 # Probability of chain move attempts per sweep ( 0.25/number of particles in chain) transmx = 0.212 # Initial maximum displacement rotmx = 7.5 # Initial maximum orientation change (degrees) edge_mx = 0.0 # Initial maximum box length change chainmmx = 0.0 # Initial maximum chain displacement chainrmx = 0.0 # Initial maximum chain rotation change (degrees) temper = 1.0 # Temperature in units kT/e paraltemper = 1.5 # Temperature for parallel tempering in kT/e wlm = 0 # Wang-Landau method wlmtype = 0 # For which atomic type (from top.init) should the Wang-Landau method be calculated? switchprob = 0.0016 # Probability of type switch attempts per sweep pairlist_update = 8 # Number of sweeps after which the pairlist should be updated seed = 1 # Random number seed write_cluster = 10000 # Number of sweeps per writing out cluster info # End of the file */ /* Example of 'Config.init' file, but you must delete comments... there are only number in configuration file #box 10.0 10.0 10.0 #particles (x,y,z) (direction_x,direction_y, direction_z) (patchdirection_x,patchdirection_y,patchdirection_z) (switched) */ /* Template for the topology file 'top.init'. ( "\\" is symbol for line continue, "#" is symbol for comment, "[" is starting sign for keyword, "]" is ending sign for kyeword ) There are three keywords, types, molecules, and system. They should be given in this order. TYPES: spherocylinders SC - purely repulsive spherocylinder with WCA potential on closest distance SCA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders.. PSC - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive CPSC - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive spheres (T)(CH)PSC - T adds second patch, CH - adds chirality SP - purely repulsive shpere with WCA potential on closest distance SPA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts [Types] # NAME NUMBER GEOTYPE EPSILON SIGMA ATTRACTION_DIST ATTRACTION_SWITCH PATCH_ANGLE PATCH_SWITCH SC_LENGTH (Optional second patch: PATCH_ROTATION PATCH_ANGLE PATCH_SWITCH )CHIRAL_ANGLE Prot1 1 PSC 1 1.2 1.346954458 1.0 80.0 5.0 3 Prot2 2 PSC 1 1.2 1.346954458 1.0 170.0 5.0 3 Prot3 3 CHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 10 Prot4 4 TCHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 90.0 90.0 5.0 10 [Molecules] # Molecules letter # bond1 - harmonic bond between nearest neighbours (end points for spherocylinders) (first constant then eq distance) # bond2 - harmonic bond between second nearest neighbours (their center of mass) (first constant then eq distance) # bondd - directional harmonic bond between nearest neighbours (end point of the second spherocylinder is attached to the point of bondlength extension of the first spherocylinder) (first constant then eq distance) # angle1 - angle between two spherocylinders -nearest neighbours (first constant then eq degrees 0-180.0) # angle2 - angle between two spherocylinder patches -nearest neighbours (first constant then eq degrees 0-180.0) # particles - types as they go in chain in molecule A: { #what: TYPE SWITCHTYPE DELTA_MU particles: 1 2 0.5 particles: 2 } B: { particles: 1 particles: 2 1 0.3 } [System] A 2 B 2 [EXTER] # wall interaction # THICKNESS EPSILON ATTRACTION_SWITCH 5.0 1.0 1.0 [EXCLUDE] #set pair types for which attraction will be excluded (reversepair is automaticaly added) 1 2 1 3 */
7744.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ #pragma omp target teams distribute for (i = 0; i < _PB_NI; i++) { #pragma omp parallel for simd num_threads(8) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp target teams distribute for (i = 0; i < _PB_NJ; i++) { #pragma omp parallel for simd num_threads(8) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp target teams distribute for (i = 0; i < _PB_NI; i++) { #pragma omp parallel for simd num_threads(8) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
declare_variant_if.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> // --- start of saxpy header with variants --- int saxpy(int, float, float *, float *); int amdgcn_saxpy(int, float, float *, float *); int nvptx_saxpy(int, float, float *, float *); #pragma omp declare variant(nvptx_saxpy) \ match(device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) #pragma omp declare variant( amdgcn_saxpy ) \ match(device = {arch(amdgcn)}, implementation = {extension(match_any)}) int saxpy(int n, float s, float *x, float *y) // base function { printf("saxpy: Running on host . IsHost:%d\n", omp_is_initial_device()); #pragma omp parallel for for(int i=0; i<n; i++) y[i] = s*x[i] + y[i]; return 1; } int amdgcn_saxpy(int n, float s, float *x, float *y) //function variant { printf("amdgcn_saxpY: Running on amdgcn device. IsHost:%d\n", omp_is_initial_device()); #pragma omp teams distribute parallel for for(int i=0; i<n; i++) { y[i] = s*x[i] + y[i]; } return 0; } int nvptx_saxpy(int n, float s, float *x, float *y) //function variant { printf("nvptx_saxpy: Running on nvptx device. IsHost:%d\n",omp_is_initial_device()); #pragma omp teams distribute parallel for for(int i=0; i<n; i++) y[i] = s*x[i] + y[i]; return 0; } // --- end of saxpy header with variants ---- #define N 128 #define THRESHOLD 127 int main() { static float x[N],y[N] __attribute__ ((aligned(64))); float s=2.0; int return_code = 0 ; for(int i=0; i<N; i++){ x[i]=i+1; y[i]=i+1; } // initialize printf("Calling saxpy with high threshold for device execution\n"); #pragma omp target if (N>(THRESHOLD*2)) return_code = saxpy(N,s,x,y); printf("Calling saxpy with low threshold for device execution\n"); #pragma omp target if (N>THRESHOLD) return_code = saxpy(N,s,x,y); printf("y[0],y[N-1]: %5.0f %5.0f\n",y[0],y[N-1]); //output: y... 5 640 return return_code; }
omp_for_bigbounds.c
// RUN: %libomp-compile -DMY_SCHEDULE=static && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=dynamic && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run // Only works with Intel Compiler since at least version 15.0 and clang since // version 11. // XFAIL: gcc, clang-3, clang-4, clang-5, clang-6, clang-7, clang-8, clang-9, clang-10 /* * Test that large bounds are handled properly and calculations of * loop iterations don't accidentally overflow */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 50000000 #define MY_MAX 2000000000 #define MY_MIN -2000000000 #ifndef MY_SCHEDULE # define MY_SCHEDULE static #endif int a, b, a_known_value, b_known_value; int test_omp_for_bigbounds() { a = 0; b = 0; #pragma omp parallel { int i; #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); return (a == a_known_value && b == b_known_value); } int main() { int i; int num_failed=0; a_known_value = 0; for (i = INT_MIN; i < MY_MAX; i+=INCR) { a_known_value++; } b_known_value = 0; for (i = INT_MAX; i >= MY_MIN; i-=INCR) { b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_bigbounds()) { num_failed++; } } return num_failed; }
cast_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: jiejun@openailab.com */ #include "cast_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct cast_param* cast_param = (struct cast_param*)ir_node->op.param_mem; int type_from = input_tensor->data_type; int type_to = output_tensor->data_type; int num_thread = exec_graph->num_thread; if (input_tensor->elem_num != output_tensor->elem_num || input_tensor->dim_num != output_tensor->dim_num) { return -1; } if (type_from == type_to) { memcpy(output_tensor->data, input_tensor->data, input_tensor->elem_num * input_tensor->elem_size); return 0; } for (uint8_t i = 0; i < input_tensor->dim_num; i++) { if (input_tensor->dims[i] != output_tensor->dims[i]) return -1; } if (input_tensor->layout != output_tensor->layout) { return -1; } if (type_from == TENGINE_DT_FP32 && type_to == TENGINE_DT_FP16) { fp32_t* idata = (fp32_t*)input_tensor->data; fp16_t* odata = (fp16_t*)output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (uint32_t i = 0; i < input_tensor->elem_num; i++) { odata[i] = fp32_to_fp16(idata[i]); } return 0; } if (type_from == TENGINE_DT_FP16 && type_to == TENGINE_DT_FP32) { fp16_t* idata = (fp16_t*)input_tensor->data; fp32_t* odata = (fp32_t*)output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (uint32_t i = 0; i < input_tensor->elem_num; i++) { odata[i] = fp16_to_fp32(idata[i]); } return 0; } if (type_from == TENGINE_DT_FP32 && type_to == TENGINE_DT_UINT8) { float* idata = (float*)input_tensor->data; uint8_t* odata = (uint8_t*)output_tensor->data; if (1 == input_tensor->quant_param_num) { float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; #pragma omp parallel for num_threads(num_thread) for (uint32_t i = 0; i < input_tensor->elem_num; i++) { int val = (int)(roundf(idata[i] / scale)) + zero_point; if (255 >= val && 0 <= val) odata[i] = (uint8_t)val; else { if (255 < val) odata[i] = 255; if (0 > val) odata[i] = 0; } } return 0; } } if (type_from == TENGINE_DT_UINT8 && type_to == TENGINE_DT_FP32) { uint8_t* idata = (uint8_t*)input_tensor->data; float* odata = (float*)output_tensor->data; if (1 == input_tensor->quant_param_num) { float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; #pragma omp parallel for num_threads(num_thread) for (uint32_t i = 0; i < input_tensor->elem_num; i++) { odata[i] = (float)(idata[i] - zero_point) * scale; } return 0; } } return -1; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* node = exec_node->ir_node; struct graph* ir_graph = node->graph; struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { (void)node_ops; (void)exec_graph; (void)exec_node; return OPS_SCORE_CANDO; } static struct node_ops ref_node_ops = {.prerun = prerun, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_cast_ref_op(void* arg) { return register_builtin_node_ops(OP_CAST, &ref_node_ops); } int unregister_cast_ref_op(void* arg) { return unregister_builtin_node_ops(OP_CAST, &ref_node_ops); }
convolution_1x1_pack1to4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack1to4_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const unsigned short* r0 = bottom_blob.channel(p); unsigned short* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_pack1to4_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
fft3d.h
/* * fft3d.h * * Copyright (C) 2014 Diamond Light Source * * Author: Richard Gildea * * This code is distributed under the BSD license, a copy of which is * included in the root directory of this package. */ #ifndef DIALS_ALGORITHMS_INTEGRATION_FFT3D_H #define DIALS_ALGORITHMS_INTEGRATION_FFT3D_H #include <stdio.h> #include <iostream> #include <cmath> #include <scitbx/vec2.h> #include <scitbx/array_family/flex_types.h> #include <scitbx/math/utils.h> #include <cstdlib> #include <scitbx/array_family/versa_matrix.h> #include <dials/array_family/scitbx_shared_and_versa.h> #include <dials/algorithms/spot_prediction/rotation_angles.h> #include <dxtbx/model/scan_helpers.h> namespace dials { namespace algorithms { using dxtbx::model::is_angle_in_range; // helper function for sampling_volume_map bool are_angles_in_range(af::ref<vec2<double> > const& angle_ranges, vec2<double> const& angles) { for (std::size_t i = 0; i < 2; i++) { double angle = angles[i]; for (std::size_t j = 0; j < angle_ranges.size(); j++) { if (is_angle_in_range(angle_ranges[j], angle)) { return true; } } } return false; } // compute a map of the sampling volume of a scan void sampling_volume_map(af::ref<double, af::c_grid<3> > const& data, af::ref<vec2<double> > const& angle_ranges, vec3<double> s0, vec3<double> m2, double const& rl_grid_spacing, double d_min, double b_iso) { typedef af::c_grid<3>::index_type index_t; index_t const gridding_n_real = index_t(data.accessor()); RotationAngles calculate_rotation_angles_(s0, m2); double one_over_d_sq_min = 1 / (d_min * d_min); for (std::size_t i = 0; i < gridding_n_real[0]; i++) { double i_rl = (double(i) - double(gridding_n_real[0] / 2.0)) * rl_grid_spacing; double i_rl_sq = i_rl * i_rl; for (std::size_t j = 0; j < gridding_n_real[1]; j++) { double j_rl = (double(j) - double(gridding_n_real[1] / 2.0)) * rl_grid_spacing; double j_rl_sq = j_rl * j_rl; for (std::size_t k = 0; k < gridding_n_real[2]; k++) { double k_rl = (double(k) - double(gridding_n_real[2] / 2.0)) * rl_grid_spacing; double k_rl_sq = k_rl * k_rl; double reciprocal_length_sq = (i_rl_sq + j_rl_sq + k_rl_sq); if (reciprocal_length_sq > one_over_d_sq_min) { continue; } vec3<double> pstar0(i_rl, j_rl, k_rl); // Try to calculate the diffracting rotation angles vec2<double> phi; try { phi = calculate_rotation_angles_(pstar0); } catch (error) { continue; } // Check that the angles are within the rotation range if (are_angles_in_range(angle_ranges, phi)) { double T; if (b_iso != 0) { T = std::exp(-b_iso * reciprocal_length_sq / 4); } else { T = 1; } data(i, j, k) = T; } } } } } /* Peak-finding algorithm inspired by the CLEAN algorithm of Högbom, J. A. 1974, A&AS, 15, 417. See also: https://doi.org/10.1051/0004-6361/200912148 */ af::shared<vec3<int> > clean_3d( af::const_ref<double, af::c_grid<3> > const& dirty_beam, af::ref<double, af::c_grid<3> > const& dirty_map, std::size_t n_peaks, double gamma = 1) { af::shared<vec3<int> > peaks; typedef af::c_grid<3>::index_type index_t; index_t const gridding_n_real = index_t(dirty_map.accessor()); DIALS_ASSERT(dirty_map.size() == dirty_beam.size()); double max_db = af::max(dirty_beam); af::c_grid<3> accessor(dirty_map.accessor()); // index_type conversion const int height = int(gridding_n_real[0]); const int depth = int(gridding_n_real[1]); const int width = int(gridding_n_real[2]); const long height_depth = height * depth; int max_idx = af::max_index(dirty_map); for (std::size_t i_peak = 0; i_peak < n_peaks; i_peak++) { // Find the maximum value in the map - this is the next "peak" const index_t shift = accessor.index_nd(max_idx); peaks.push_back(vec3<int>(shift)); // reposition the dirty beam on the current peak and subtract from // the dirty map const double max_value = dirty_map[max_idx]; const double scale = max_value / max_db * gamma; max_idx = 0; // reset for next cycle #pragma omp parallel for for (int i = 0; i < width; i++) { int i_db = i - shift[0]; if (i_db < 0) { i_db += width; } else if (i_db >= width) { i_db -= width; } // DIALS_ASSERT(i_db >= 0 && i_db < width); const long ipart_dm = i * height_depth; const long ipart_db = i_db * height_depth; for (int j = 0; j < height; j++) { int j_db = j - shift[1]; if (j_db < 0) { j_db += height; } else if (j_db >= height) { j_db -= height; } // DIALS_ASSERT(j_db >= 0 && j_db < height); const long ijpart_dm = ipart_dm + j * depth; const long ijpart_db = ipart_db + j_db * depth; for (int k = 0; k < depth; k++) { int k_db = k - shift[2]; if (k_db < 0) { k_db += depth; } else if (k_db >= depth) { k_db -= depth; } // DIALS_ASSERT(k_db >= 0 && k_db < depth); const long idx_dm = ijpart_dm + k; const long idx_db = ijpart_db + k_db; dirty_map[idx_dm] -= dirty_beam[idx_db] * scale; if (dirty_map[max_idx] < dirty_map[idx_dm]) #pragma omp critical(max_idx) { max_idx = idx_dm; } } } } } return peaks; } void map_centroids_to_reciprocal_space_grid( af::ref<double, af::c_grid<3> > const& grid, af::const_ref<vec3<double> > const& reciprocal_space_vectors, af::ref<bool> const& selection, double d_min, double b_iso = 0) { typedef af::c_grid<3>::index_type index_t; index_t const gridding_n_real = index_t(grid.accessor()); DIALS_ASSERT(d_min >= 0); DIALS_ASSERT(gridding_n_real[0] == gridding_n_real[1]); DIALS_ASSERT(gridding_n_real[0] == gridding_n_real[2]); const int n_points = gridding_n_real[0]; const double rlgrid = 2 / (d_min * n_points); const double one_over_rlgrid = 1 / rlgrid; const int half_n_points = n_points / 2; for (int i = 0; i < reciprocal_space_vectors.size(); i++) { if (!selection[i]) { continue; } const vec3<double> v = reciprocal_space_vectors[i]; const double v_length = v.length(); const double d_spacing = 1 / v_length; if (d_spacing < d_min) { selection[i] = false; continue; } vec3<int> coord; for (int j = 0; j < 3; j++) { coord[j] = scitbx::math::iround(v[j] * one_over_rlgrid) + half_n_points; } if ((coord.max() >= n_points) || coord.min() < 0) { selection[i] = false; continue; } double T; if (b_iso != 0) { T = std::exp(-b_iso * v_length * v_length / 4.0); } else { T = 1; } grid(coord) = T; } } }} // namespace dials::algorithms #endif
daxpy.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #define N 20480 int main(void) { double *x, *y; size_t size = N*sizeof(double); x = (double *)malloc(size); y = (double *)malloc(size); // initialize x and y srand(time(NULL)); double a = (double)random() / RAND_MAX; int i; for (i=0; i<N; i++) x[i] = (double)random() / RAND_MAX; for (i=0; i<N; i++) y[i] = (double)random() / RAND_MAX; // compute axpy on the host CPU cores double *yomp; yomp = (double *)malloc(size); #pragma omp parallel for for (i=0; i<N; i++) { yomp[i] = a * x[i] + y[i]; } // compute axpy on the accelerator double *yacc; yacc = (double *)malloc(size); #pragma acc kernels copyin(x[0:N],y[0:N]), copyout(yacc[0:N]) for (i=0; i<N; i++) { yacc[i] = a*x[i] + y[i]; } // verify the results double m = -1.; double tmp; #pragma omp parallel for private(tmp) reduction(max:m) for (i=0; i<N; i++) { tmp = fabs( (yacc[i]-yomp[i])/yomp[i] ); if ( tmp > m ) m = tmp; } // release memory free(x); free(y); free(yomp); free(yacc); if ( m < 1E-12 ) { printf("Success!\n"); return 0; } else { printf("Failure!\n"); return 1; } }
spoinv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpoinv.c, normal z -> s, Fri Sep 28 17:38:09 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_poinv * * Performs the Cholesky inversion of a symmetric positive definite * matrix A. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] pA * On entry, the symmetric positive definite matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the inverse of A following a * Cholesky factorization A = U^T*U or A = L*L^T. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_cpoinv * @sa plasma_dpoinv * @sa plasma_spoinv * ******************************************************************************/ int plasma_spoinv(plasma_enum_t uplo, int n, float *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imax(n, 0) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_poinv(plasma, PlasmaRealFloat, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_triangular_create(PlasmaRealFloat, uplo, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_str2desc(pA, lda, A, &sequence, &request); // Call the tile async function. plasma_omp_spoinv(uplo, A, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_sdesc2tr(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_poinv * * Computes the inverse of a complex symmetric * positive definite matrix A using the Cholesky factorization. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * On entry, the symmetric positive definite matrix A. * On exit, the upper or lower triangle of the (symmetric) * inverse of A, overwriting the input factor U or L. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_spoinv * @sa plasma_omp_spoinv * @sa plasma_omp_cpoinv * @sa plasma_omp_dpoinv * @sa plasma_omp_spoinv * ******************************************************************************/ void plasma_omp_spoinv(plasma_enum_t uplo, plasma_desc_t A, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0) { return; } // Factorize A. plasma_pspotrf(uplo, A, sequence, request); // Invert triangular part. plasma_pstrtri(uplo, PlasmaNonUnit, A, sequence, request); // Compute product of upper and lower triangle. plasma_pslauum(uplo, A, sequence, request); }
non_simd.c
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include <omp.h> #define REPS 10000 double t0; double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } int main(int argc, char *argv[]) { int M = atoi(argv[1]); // size of vectors int N = atoi(argv[2]); // number of OpenMP threads float*a, *b; a = (float*) malloc(sizeof(float) * M); b = (float*) malloc(sizeof(float) * M); int i, j, k; for (i = 0; i < M; i++) { a[i] = i; b[i] = i + 3; } omp_set_num_threads(N); float sum = 0; t0 = mysecond(); for (j = 0; j < REPS; j++) { #pragma omp parallel for reduction(+:sum) for (i = k; i < M; i++) sum += a[i] * b[i]; } t0 = (mysecond() - t0) * 1.e3; fprintf(stdout, "result = %1.3e\n", sum); fprintf(stdout, "parallel loop = %3.2f ms\n", t0); return 0; }
generator_spgemm_csr_asparse.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "generator_spgemm_csr_asparse.h" #include "generator_common.h" #include "libxsmm_main.h" LIBXSMM_API_INTERN void libxsmm_generator_spgemm_csr_asparse( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const char* i_arch, const unsigned int* i_row_idx, const unsigned int* i_column_idx, const double* i_values ) { unsigned int l_m; unsigned int l_z; unsigned int l_row_elements; unsigned int l_flop_count = 0; char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; LIBXSMM_UNUSED(i_values); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* reset C if beta is zero */ if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); if ( i_xgemm_desc->m > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0f; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* determine the correct simd pragma for each architecture */ if ( ( strcmp( i_arch, "noarch" ) == 0 ) || ( strcmp( i_arch, "wsm" ) == 0 ) || ( strcmp( i_arch, "snb" ) == 0 ) || ( strcmp( i_arch, "hsw" ) == 0 ) ) { if ( i_xgemm_desc->n > 7 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->n > 3 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->n > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else {} } else if ( ( strcmp( i_arch, "knl" ) == 0 ) || ( strcmp( i_arch, "knm" ) == 0 ) || ( strcmp( i_arch, "skx" ) == 0 ) || ( strcmp( i_arch, "clx" ) == 0 ) || ( strcmp( i_arch, "cpx" ) == 0 ) ) { if ( (i_xgemm_desc->n > 1) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(16)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } else { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } if ( (i_xgemm_desc->n > 1) && ((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) && ((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } /* generate the actual kernel */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); for ( l_m = 0; l_m < (unsigned int)i_xgemm_desc->m; l_m++ ) { l_row_elements = i_row_idx[l_m+1] - i_row_idx[l_m]; for ( l_z = 0; l_z < l_row_elements; l_z++ ) { /* check k such that we just use columns which actually need to be multiplied */ if ( i_column_idx[i_row_idx[l_m] + l_z] < (unsigned int)i_xgemm_desc->k ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_n] += A[%u] * B[%u+l_n];\n", l_m * i_xgemm_desc->ldc, i_row_idx[l_m] + l_z, i_column_idx[i_row_idx[l_m] + l_z]*i_xgemm_desc->ldb ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_flop_count += 2; } } } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* add flop counter */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); }
kdGroupFinder_omp2.c
// Initialization // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <assert.h> #include <sys/time.h> #include <omp.h> #include "nrutil.h" #include "kdtree.h" #include "groups.h" struct galaxy *GAL; int NGAL; int OUTPUT=0; /* Local functions */ float angular_separation(float a1, float d1, float a2, float d2); void find_satellites(int icen, void *kd); float radial_probability(float mass, float dr, float rad, float ang_rad); float fluxlim_correction(float z); void groupfind(void); /* Variables for determining * if a galaxy is a satellite */ float BPROB=10; float BPROB_RED = 5, BPROB_XRED=0; float BPROB_BLUE = 15, BPROB_XBLUE=0; /* Variables for weighting the * central galaxies of the blue galaxies */ float WCEN_MASS = 10.5, WCEN_SIG = 0.5, WCEN_MASSR = 10.5, WCEN_SIGR = 1.0, WCEN_NORM = 1; float MINREDSHIFT; float MAXREDSHIFT; float FRAC_AREA; int FLUXLIM, COLOR; int ARGC; char **ARGV; int main(int argc, char **argv) { int i,istep,istart; double t0,t1; if(argc<5) { fprintf(stderr,"kdGroupFinder inputfile zmin zmax frac_area [fluxlim] [color] [wcenvalues 1-3] [pbvalues 1-4]> out\n"); exit(0); } ARGC = argc; ARGV = argv; MINREDSHIFT = atof(argv[2]); MAXREDSHIFT = atof(argv[3]); FRAC_AREA = atof(argv[4]); if(argc>5) FLUXLIM = atoi(argv[5]); if(argc>6) COLOR = atoi(argv[6]); if(argc>7) { WCEN_MASS = atof(argv[7]); WCEN_SIG = atof(argv[8]); WCEN_MASSR = atof(argv[9]); WCEN_SIGR = atof(argv[10]); } if(argc>11) { BPROB_RED = atof(argv[11]); BPROB_XRED = atof(argv[12]); BPROB_BLUE = atof(argv[13]); BPROB_XBLUE = atof(argv[14]); } fprintf(stderr,"input> %f %f %f %d\n",MINREDSHIFT, MAXREDSHIFT, FRAC_AREA, FLUXLIM); fprintf(stderr,"input> %f %f %f\n",WCEN_MASS, WCEN_SIG, WCEN_NORM); fprintf(stderr,"input> %f %f %f %f\n",BPROB_RED,BPROB_XRED,BPROB_BLUE,BPROB_XBLUE); OUTPUT = 1; groupfind(); OUTPUT = 0; tabulate_hods(); lsat_model(); populate_simulation(-1,0); t0 = omp_get_wtime(); #pragma omp parallel private(i,istart,istep) { istart = omp_get_thread_num(); istep = omp_get_num_threads(); for(i=istart;i<10;i+=istep) { populate_simulation(i/2,i%2); } } t1 = omp_get_wtime(); fprintf(stderr,"popsim> %.2f sec\n",t1-t0); } void groupfind() { FILE *fp; char aa[1000]; int i, i1, niter, MAX_ITER=5, j, ngrp_prev; float frac_area, zmin, zmax, nsat_tot; int fluxlim = 0, colors = 1; double galden, pt[3]; long IDUM1 = -555; double t0,t1,t3,t4; static int *permanent_id, *itmp, *flag; static float volume, *xtmp, *lumshift; static void *kd; static int first_call=1, ngrp; if(first_call) { colors = COLOR; first_call = 0; fp = openfile(ARGV[1]); NGAL = filesize(fp); fprintf(stderr,"Allocating space for [%d] galaxies\n",NGAL); GAL = calloc(NGAL, sizeof(struct galaxy)); flag = ivector(0,NGAL-1); fluxlim = FLUXLIM; zmin = MINREDSHIFT; zmax = MAXREDSHIFT; // calculate the volume of the sample volume = 4./3.*PI*(pow(distance_redshift(zmax),3.0))*FRAC_AREA; volume = volume - 4./3.*PI*(pow(distance_redshift(zmin),3.0))*FRAC_AREA; for(i=0;i<=NGAL;++i) { fscanf(fp,"%f %f %f %f",&GAL[i].ra,&GAL[i].dec,&GAL[i].redshift,&GAL[i].mstellar); GAL[i].ra *= PI/180.; GAL[i].dec *= PI/180.; GAL[i].id = i; GAL[i].rco = distance_redshift(GAL[i].redshift); // check if the stellar mass is in log if(GAL[i].mstellar<100) GAL[i].mstellar = pow(10.0,GAL[i].mstellar); // check to see if we're doing a fluxlimited sample if(fluxlim) fscanf(fp,"%f",&GAL[i].vmax); else GAL[i].vmax = volume; // check to see if we're using colors if(colors) fscanf(fp,"%f",&GAL[i].color); fgets(aa,1000,fp); } fclose(fp); fprintf(stderr,"Done reading in from [%s]\n",ARGV[1]); fprintf(stderr,"Volume= %e L_box= %f\n",volume, pow(volume, THIRD)); fprintf(stderr,"Number density= %e\n",NGAL/volume); // first sort by stellar mass xtmp = vector(1,NGAL); itmp = ivector(1,NGAL); permanent_id = ivector(1,NGAL); lumshift = vector(0,NGAL-1); for(i=1;i<=NGAL;++i) { // just for kicks, give each galaxy a random luminosity lumshift[i-1] = pow(10.0,gasdev(&IDUM1)*0.0); xtmp[i] = -(GAL[i-1].mstellar*lumshift[i-1]); itmp[i] = i-1; } fprintf(stderr,"sorting galaxies...\n"); sort2(NGAL, xtmp, itmp); fprintf(stderr,"done sorting galaxies.\n"); // do the inverse-abundance matching density2host_halo(0.01); fprintf(stderr,"Starting inverse-sham...\n"); galden = 0; // reset the sham counters if(fluxlim) density2host_halo_zbins(-1); for(i1=1;i1<=NGAL;++i1) { i= itmp[i1]; galden += 1/GAL[i].vmax; if(fluxlim) GAL[i].mass = density2host_halo_zbins(GAL[i].redshift); else GAL[i].mass = density2host_halo_zbins(galden); GAL[i].rad = pow(3*GAL[i].mass/(4.*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),THIRD); GAL[i].theta = GAL[i].rad/GAL[i].rco; GAL[i].sigmav = sqrt(BIG_G*GAL[i].mass/2.0/GAL[i].rad*(1+GAL[i].redshift)); GAL[i].psat = 0; j = i; GAL[j].x = GAL[j].rco * cos(GAL[j].ra) * cos(GAL[j].dec); GAL[j].y = GAL[j].rco * sin(GAL[j].ra) * cos(GAL[j].dec); GAL[j].z = GAL[j].rco * sin(GAL[j].dec); } fprintf(stderr,"Done inverse-sham.\n"); // assume that NGAL=NGROUP at first ngrp = NGAL; } // let's create a 3D KD tree fprintf(stderr,"Building KD-tree...\n"); kd = kd_create(3); for(i = 1; i <= NGAL; ++i){ j = i; permanent_id[j] = j; pt[0] = GAL[j].x; pt[1] = GAL[j].y; pt[2] = GAL[j].z; assert( kd_insert(kd, pt, (void*)&permanent_id[j]) == 0); } fprintf(stderr,"Done building KD-tree. %d\n",ngrp); // now start the group-finding iteratin for(niter=1;niter<=MAX_ITER;++niter) { t3 = omp_get_wtime(); // first, reset the psat values for(j=0;j<NGAL;++j) { GAL[j].psat = 0; GAL[j].nsat = 0; GAL[j].mtot = GAL[j].mstellar*lumshift[j]; if(GAL[j].color<0.8) GAL[j].mtot *= 1.0/pow(10.0,0.5*(1+erf((log10(GAL[j].mstellar)-WCEN_MASS)/WCEN_SIG))); else GAL[j].mtot *= pow(10.0,0.5*(1+erf((log10(GAL[j].mstellar)-WCEN_MASSR)/WCEN_SIGR))); flag[j] = 1; } // find the satellites for each halo, in order of group mass ngrp_prev = ngrp; ngrp = 0; t0 = omp_get_wtime(); for(i1=1;i1<=ngrp_prev;++i1) { i = itmp[i1]; flag[i] = 0; find_satellites(itmp[i1],kd); } find_satellites(-1,kd); t1 = omp_get_wtime(); for(i1=1;i1<=ngrp_prev;++i1) { i = itmp[i1]; if(GAL[i].psat<0.5) { ngrp++; xtmp[ngrp] = -GAL[i].mtot; itmp[ngrp] = i; if(fluxlim) xtmp[ngrp] *= fluxlim_correction(GAL[i].redshift); } } // go back and check objects are newly-exposed centrals for(j=0;j<NGAL;++j) { if(flag[j] && GAL[j].psat<0.5) { find_satellites(j,kd); ngrp++; xtmp[ngrp] = -GAL[j].mtot; itmp[ngrp] = j; if(fluxlim) xtmp[ngrp] *= fluxlim_correction(GAL[j].redshift); } } // sort groups by their total stellar mass sort2(ngrp,xtmp,itmp); // reassign the halo masses nsat_tot = galden = 0; // reset the sham counters if(fluxlim) density2host_halo_zbins(-1); for(j=1;j<=ngrp;++j) { i= itmp[j]; galden += 1/GAL[i].vmax; if(fluxlim) GAL[i].mass = density2host_halo_zbins(GAL[i].redshift); else GAL[i].mass = density2host_halo(galden); GAL[i].rad = pow(3*GAL[i].mass/(4.*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),THIRD); GAL[i].theta = GAL[i].rad/GAL[i].rco; GAL[i].sigmav = sqrt(BIG_G*GAL[i].mass/2.0/GAL[i].rad*(1+GAL[i].redshift)); nsat_tot += GAL[i].nsat; } t4 = omp_get_wtime(); //for the satellites, set their host halo mass for(j=0;j<NGAL;++j) if(GAL[j].psat>0.5) GAL[j].mass = GAL[GAL[j].igrp].mass; fprintf(stderr,"iter %d ngroups=%d fsat=%f (kdtime=%.2f %.2f)\n",niter,ngrp,nsat_tot/NGAL, t1-t0,t4-t3); } /* Output to disk the final results */ if(OUTPUT) { for(i=0;i<NGAL;++i) { printf("%d %f %f %f %e %e %f %e %e %e %d\n", i, GAL[i].ra*180/PI, GAL[i].dec*180/PI,GAL[i].redshift, GAL[i].mstellar, GAL[i].vmax, GAL[i].psat, GAL[i].mass, GAL[i].nsat, GAL[i].mtot, GAL[i].igrp); } fflush(stdout); } /* let's free up the memory of the kdtree */ kd_free(kd); } /* Distance-redshift relation */ float func_dr1(float z) { return pow(OMEGA_M*(1+z)*(1+z)*(1+z)+(1-OMEGA_M),-0.5); } float distance_redshift(float z) { float x; if(z<=0)return 0; x= c_on_H0*qromo(func_dr1,0.0,z,midpnt); return x; } /* Here is the main code to find satellites for a given central galaxy */ void find_satellites(int ii, void *kd) { int i,nThreads; static int icen_set[24]; static int cnt=0; if(GAL[ii].psat>0.5)return; #pragma omp parallel shared(nThreads) { #pragma omp single { nThreads = omp_get_num_threads(); } } if(ii>=0) { icen_set[cnt] = ii; cnt++; if(cnt<nThreads)return; } //printf("resetting count: %d %d\n",cnt,nThreads); //for(i=0;i<cnt;++i) // printf("cnt[%d]= %d\n",i,icen_set[i]); cnt = 0; #pragma omp parallel { int j, k, thisTask, icen; float dx, dy, dz, theta, prob_ang, vol_corr, prob_rad, grp_lum, p0, range; float cenDist, bprob; void *set; int *pch; double cen[3]; double sat[3]; int local_cnt = 0; thisTask = omp_get_thread_num(); icen = icen_set[thisTask]; //printf("ICEN %d %d\n",thisTask,icen); //fflush(stdout); // check if this galaxy has already been given to a group if(GAL[icen].psat>0.5)goto END_SAT; cen[0] = GAL[icen].x; cen[1] = GAL[icen].y; cen[2] = GAL[icen].z; range = 4*GAL[icen].sigmav/100.0*(1+GAL[icen].redshift)/ sqrt(OMEGA_M*pow(1+GAL[icen].redshift,3.0) + 1-OMEGA_M); set = kd_nearest_range(kd, cen, range); // Set now contains the nearest neighbours within a distance range. Grab their info. while( !kd_res_end(set)) { local_cnt++; // Get index value of the current neighbor pch = (int*)kd_res_item(set, sat); j = *pch; kd_res_next(set); /* if(thisTask==0){ printf("JJ %d %d %f %f %f %f\n",j,icen,GAL[icen].x, GAL[j].x, range,sat[0]); fflush(stdout); } */ // Skip if target galaxy is the same as the central (obviously). if(j == icen)continue; // skip if the object is more massive than the icen if(GAL[j].mstellar>=GAL[icen].mstellar)continue; // Skip if already assigned to a central. if(GAL[j].psat>0.5)continue; // check if the galaxy is outside the angular radius of the halo dz = fabs(GAL[icen].redshift - GAL[j].redshift)*SPEED_OF_LIGHT; theta = angular_separation(GAL[icen].ra,GAL[icen].dec,GAL[j].ra,GAL[j].dec); if(theta > GAL[icen].theta){ continue; } // Now determine the probability of being a satellite //(both projected onto the sky, and along the line of sight). prob_ang = radial_probability(GAL[icen].mass, theta, GAL[icen].rad, GAL[icen].theta); prob_rad = exp(-dz*dz/(2*GAL[icen].sigmav*GAL[icen].sigmav)) *SPEED_OF_LIGHT/(RT2PI*GAL[icen].sigmav); // set the background level if(GAL[j].color>0.8) bprob = BPROB_RED + (log10(GAL[j].mstellar)-9.5)*BPROB_XRED; else bprob = BPROB_BLUE + (log10(GAL[j].mstellar)-9.5)*BPROB_XBLUE; // combine them into the total probability p0 = (1 - 1/(1 + prob_ang * prob_rad / bprob)); if(p0 < 0){ printf("ZERO %e\n",p0); p0 = 0; } if(p0<0.5)continue; if(GAL[j].igrp)continue; // this is considered a member of the group GAL[j].psat = p0; GAL[j].igrp = icen; GAL[icen].mtot += GAL[j].mstellar; GAL[icen].nsat++; } //exit(0); // Correct for boundary conditions dz = SPEED_OF_LIGHT* fabs(GAL[icen].redshift - MINREDSHIFT); vol_corr = 1-(0.5*erfc(dz/(ROOT2*GAL[icen].sigmav))); GAL[icen].nsat /= vol_corr; GAL[icen].mtot /= vol_corr; dz = SPEED_OF_LIGHT* fabs(GAL[icen].redshift - MAXREDSHIFT); vol_corr = 1-(0.5*erfc(dz/(ROOT2*GAL[j].sigmav))); GAL[icen].nsat /= vol_corr; GAL[icen].mtot /= vol_corr; END_SAT: //printf("NCNT %d %d %d\n",thisTask,icen,local_cnt); #pragma omp barrier dz =0; } } /* angular separation between two points in ra/dec */ float angular_separation(float a1, float d1, float a2, float d2) { float cd1,cd2,sd1,sd2,ca1a2,sa1a2; return atan((sqrt(cos(d2)*cos(d2)*sin(a2-a1)*sin(a2-a1) + pow(cos(d1)*sin(d2) - sin(d1)*cos(d2)*cos(a2-a1),2.0)))/ (sin(d1)*sin(d2) + cos(d1)*cos(d2)*cos(a2-a1))); } /* Probability assuming a projected NFW profile */ float radial_probability(float mass, float dr, float rad, float ang_rad) { float c, x, rs, delta, f; dr = dr*rad/ang_rad; c = 10.0*pow(mass/1.0E+14,-0.11); rs = rad/c; x = dr/rs; if(x<1) f = 1/(x*x-1)*(1-log((1+sqrt(1-x*x))/x)/(sqrt(1-x*x))); if(x==1) f = 1.0/3.0; if(x>1) f = 1/(x*x-1)*(1-atan(sqrt(x*x-1))/sqrt(x*x-1)); delta = DELTA_HALO/3.0*c*c*c/(log(1+c)-c/(1+c)); return 1.0/c_on_H0*2*rs*delta*f; } /* This is calibrated from the MXXL BGS mock, * from ratio of luminosity density in redshift * bins relative to total 1/Vmax-weighted luminosity * density. (SLightly different than Yang et al). * * luminosity_correction.py */ float fluxlim_correction(float z) { return 1; return pow(10.0,pow(z/0.4,4.0)*0.1); }
gemm.c
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef DNETGPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaThreadSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
lis_matrix_msr.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * function | SOM | *-----------------------------+-----+ * lis_matrix_set | o | * lis_matrix_setDLU | o | * lis_matrix_malloc | o | * lis_matrix_elements_copy | o | * lis_matrix_transpose | o | * lis_matrix_split | o | * lis_matrix_merge | o | *-----------------------------+-----+-----+ * function |merge|split| *-----------------------------+-----+-----| * lis_matrix_convert | o | | * lis_matrix_copy | o | o | * lis_matrix_get_diagonal | o | o | * lis_matrix_scaling | o | o | * lis_matrix_scaling_symm | o | o | * lis_matrix_normf | o | o | * lis_matrix_sort | o | o | * lis_matrix_solve | xxx | o | * lis_matrix_solvet | xxx | o | ************************************************/ #undef __FUNC__ #define __FUNC__ "lis_matrix_set_msr" LIS_INT lis_matrix_set_msr(LIS_INT nnz, LIS_INT ndz, LIS_INT *index, LIS_SCALAR *value, LIS_MATRIX A) { LIS_INT err; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->index = index; A->value = value; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_MSR; A->nnz = nnz; A->ndz = ndz; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_setDLU_msr" LIS_INT lis_matrix_setDLU_msr(LIS_INT lnnz, LIS_INT unnz, LIS_INT lndz, LIS_INT undz, LIS_SCALAR *diag, LIS_INT *lindex, LIS_SCALAR *lvalue, LIS_INT *uindex, LIS_SCALAR *uvalue, LIS_MATRIX A) { LIS_INT err; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->L = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_msr::A->L"); if( A->L==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); return LIS_OUT_OF_MEMORY; } A->U = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_msr::A->U"); if( A->U==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); lis_matrix_DLU_destroy(A); return LIS_OUT_OF_MEMORY; } err = lis_matrix_diag_create(A->n,0,A->comm,&D); if( err ) { lis_matrix_DLU_destroy(A); return err; } lis_free(D->value); D->value = diag; A->D = D; A->L->nnz = lnnz; A->L->ndz = lndz; A->L->index = lindex; A->L->value = lvalue; A->U->nnz = unnz; A->U->ndz = undz; A->U->index = uindex; A->U->value = uvalue; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_MSR; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_malloc_msr" LIS_INT lis_matrix_malloc_msr(LIS_INT n, LIS_INT nnz, LIS_INT ndz, LIS_INT **index, LIS_SCALAR **value) { LIS_DEBUG_FUNC_IN; *index = NULL; *value = NULL; *index = (LIS_INT *)lis_malloc( (nnz+ndz+1)*sizeof(LIS_INT),"lis_matrix_malloc_msr::index" ); if( *index==NULL ) { LIS_SETERR_MEM((nnz+ndz+1)*sizeof(LIS_INT)); lis_free2(2,*index,*value); return LIS_OUT_OF_MEMORY; } *value = (LIS_SCALAR *)lis_malloc( (nnz+ndz+1)*sizeof(LIS_SCALAR),"lis_matrix_malloc_msr::value" ); if( *value==NULL ) { LIS_SETERR_MEM((nnz+ndz+1)*sizeof(LIS_SCALAR)); lis_free2(2,*index,*value); return LIS_OUT_OF_MEMORY; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_elements_copy_msr" LIS_INT lis_matrix_elements_copy_msr(LIS_INT n, LIS_INT *index, LIS_SCALAR *value, LIS_INT *o_index, LIS_SCALAR *o_value) { LIS_INT i,j; LIS_DEBUG_FUNC_IN; #ifdef _OPENMP #pragma omp parallel private(i,j) #endif { #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n+1;i++) { o_index[i] = index[i]; o_value[i] = value[i]; } #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n;i++) { for(j=index[i];j<index[i+1];j++) { o_value[j] = value[j]; o_index[j] = index[j]; } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_copy_msr" LIS_INT lis_matrix_copy_msr(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT err; LIS_INT i,n,nnz,ndz,lnnz,unnz,lndz,undz; LIS_INT *index; LIS_INT *lindex; LIS_INT *uindex; LIS_SCALAR *value,*lvalue,*uvalue,*diag; LIS_DEBUG_FUNC_IN; n = Ain->n; if( Ain->is_splited ) { lnnz = Ain->L->nnz; unnz = Ain->U->nnz; lndz = Ain->L->ndz; undz = Ain->U->ndz; lindex = NULL; uindex = NULL; diag = NULL; err = lis_matrix_malloc_msr(n,lnnz,lndz,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_msr(n,unnz,undz,&uindex,&uvalue); if( err ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } diag = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_matrix_copy_msr::diag"); if( diag==NULL ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { diag[i] = Ain->D->value[i]; } lis_matrix_elements_copy_msr(n,Ain->L->index,Ain->L->value,lindex,lvalue); lis_matrix_elements_copy_msr(n,Ain->U->index,Ain->U->value,uindex,uvalue); err = lis_matrix_setDLU_msr(lnnz,unnz,lndz,undz,diag,lindex,lvalue,uindex,uvalue,Aout); if( err ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } } if( !Ain->is_splited || (Ain->is_splited && Ain->is_save) ) { index = NULL; value = NULL; nnz = Ain->nnz; ndz = Ain->ndz; err = lis_matrix_malloc_msr(n,nnz,ndz,&index,&value); if( err ) { return err; } lis_matrix_elements_copy_msr(n,Ain->index,Ain->value,index,value); err = lis_matrix_set_msr(nnz,ndz,index,value,Aout); if( err ) { lis_free2(2,index,value); return err; } } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_get_diagonal_msr" LIS_INT lis_matrix_get_diagonal_msr(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i; LIS_INT n; LIS_DEBUG_FUNC_IN; n = A->n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { d[i] = A->D->value[i]; } } else { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { d[i] = A->value[i]; } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_msr" LIS_INT lis_matrix_scaling_msr(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT n; LIS_DEBUG_FUNC_IN; n = A->n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0; i<n; i++) { A->D->value[i] = 1.0; for(j=A->L->index[i];j<A->L->index[i+1];j++) { A->L->value[j] *= d[i]; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { A->U->value[j] *= d[i]; } } } else { #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0; i<n; i++) { A->value[i] = 1.0; for(j=A->index[i];j<A->index[i+1];j++) { A->value[j] *= d[i]; } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_symm_msr" LIS_INT lis_matrix_scaling_symm_msr(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT n; LIS_DEBUG_FUNC_IN; n = A->n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0; i<n; i++) { A->D->value[i] = 1.0; for(j=A->L->index[i];j<A->L->index[i+1];j++) { A->L->value[j] = A->L->value[j]*d[i]*d[A->L->index[j]]; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { A->U->value[j] = A->U->value[j]*d[i]*d[A->U->index[j]]; } } } else { #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0; i<n; i++) { A->value[i] = 1.0; for(j=A->index[i];j<A->index[i+1];j++) { A->value[j] = A->value[j]*d[i]*d[A->index[j]]; } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_normf_msr" LIS_INT lis_matrix_normf_msr(LIS_MATRIX A, LIS_SCALAR *nrm) { LIS_INT i,j; LIS_INT n; LIS_SCALAR sum; LIS_DEBUG_FUNC_IN; n = A->n; sum = (LIS_SCALAR)0; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->D->value[i]*A->D->value[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { sum += A->L->value[j]*A->L->value[j]; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { sum += A->U->value[j]*A->U->value[j]; } } } else { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->value[i]*A->value[i]; for(j=A->index[i];j<A->index[i+1];j++) { sum += A->value[j]*A->value[j]; } } } *nrm = sqrt(sum); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_transpose_msr" LIS_INT lis_matrix_transpose_msr(LIS_MATRIX Ain, LIS_MATRIX *Aout) { LIS_DEBUG_FUNC_IN; /* err = lis_matrix_convert_msr2ccs(Ain,Aout);*/ (*Aout)->matrix_type = LIS_MATRIX_MSR; (*Aout)->status = LIS_MATRIX_MSR; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_split_msr" LIS_INT lis_matrix_split_msr(LIS_MATRIX A) { LIS_INT i,j,n; LIS_INT lnnz,unnz; LIS_INT lndz,undz; LIS_INT err; LIS_INT *lindex,*uindex; LIS_SCALAR *lvalue,*uvalue; #ifdef _OPENMP LIS_INT kl,ku; LIS_INT *liw,*uiw; #endif LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; n = A->n; lnnz = 0; unnz = 0; lndz = n; undz = n; D = NULL; lindex = NULL; lvalue = NULL; uindex = NULL; uvalue = NULL; #ifdef _OPENMP liw = (LIS_INT *)lis_malloc((n+1)*sizeof(LIS_INT),"lis_matrix_split_msr::liw"); if( liw==NULL ) { LIS_SETERR_MEM((n+1)*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } uiw = (LIS_INT *)lis_malloc((n+1)*sizeof(LIS_INT),"lis_matrix_split_msr::uiw"); if( uiw==NULL ) { LIS_SETERR_MEM((n+1)*sizeof(LIS_INT)); lis_free(liw); return LIS_OUT_OF_MEMORY; } #pragma omp parallel for private(i) for(i=0;i<n+1;i++) { liw[i] = 0; uiw[i] = 0; } #pragma omp parallel for private(i,j) for(i=0;i<n;i++) { for(j=A->index[i];j<A->index[i+1];j++) { if( A->index[j]<i ) { liw[i+1]++; } else if( A->index[j]>i ) { uiw[i+1]++; } } } liw[0] = n+1; uiw[0] = n+1; for(i=0;i<n;i++) { liw[i+1] += liw[i]; uiw[i+1] += uiw[i]; } lnnz = liw[n]; unnz = uiw[n]; #else for(i=0;i<n;i++) { for(j=A->index[i];j<A->index[i+1];j++) { if( A->index[j]<i ) { lnnz++; } else if( A->index[j]>i ) { unnz++; } } } #endif err = lis_matrix_LU_create(A); if( err ) { return err; } err = lis_matrix_malloc_msr(n,lnnz,lndz,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_msr(n,unnz,undz,&uindex,&uvalue); if( err ) { lis_free2(4,lindex,lvalue,uindex,uvalue); return err; } err = lis_matrix_diag_duplicateM(A,&D); if( err ) { lis_free2(4,lindex,lvalue,uindex,uvalue); return err; } #ifdef _OPENMP #pragma omp parallel for private(i) for(i=0;i<n+1;i++) { lindex[i] = liw[i]; uindex[i] = uiw[i]; } #pragma omp parallel for private(i,j,kl,ku) for(i=0;i<n;i++) { kl = lindex[i]; ku = uindex[i]; D->value[i] = A->value[i]; for(j=A->index[i];j<A->index[i+1];j++) { if( A->index[j]<i ) { lindex[kl] = A->index[j]; lvalue[kl] = A->value[j]; kl++; } else if( A->index[j]>i ) { uindex[ku] = A->index[j]; uvalue[ku] = A->value[j]; ku++; } } } lis_free2(2,liw,uiw); #else lnnz = n+1; unnz = n+1; lindex[0] = n+1; uindex[0] = n+1; for(i=0;i<n;i++) { D->value[i] = A->value[i]; for(j=A->index[i];j<A->index[i+1];j++) { if( A->index[j]<i ) { lindex[lnnz] = A->index[j]; lvalue[lnnz] = A->value[j]; lnnz++; } else if( A->index[j]>i ) { uindex[unnz] = A->index[j]; uvalue[unnz] = A->value[j]; unnz++; } } lindex[i+1] = lnnz; uindex[i+1] = unnz; } #endif A->L->nnz = lnnz - (n+1); A->L->ndz = lndz; A->L->index = lindex; A->L->value = lvalue; A->U->nnz = unnz - (n+1); A->U->ndz = undz; A->U->index = uindex; A->U->value = uvalue; A->D = D; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_merge_msr" LIS_INT lis_matrix_merge_msr(LIS_MATRIX A) { LIS_INT i,j,n,is; LIS_INT nnz,ndz; LIS_INT err; LIS_INT *index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = A->n; nnz = 0; ndz = 0; is = A->is; index = NULL; value = NULL; nnz = A->L->nnz + A->U->nnz + n; err = lis_matrix_malloc_msr(n,nnz,ndz,&index,&value); if( err ) { return err; } nnz = n+1; index[0] = n+1; if( A->matrix_type==LIS_MATRIX_MSR ) { for(i=0;i<n;i++) { value[i] = A->D->value[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { index[nnz] = A->L->index[j]; value[nnz] = A->L->value[j]; nnz++; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { index[nnz] = A->U->index[j]; value[nnz] = A->U->value[j]; nnz++; } index[i+1] = nnz; } } else { for(i=0;i<n;i++) { value[i] = A->D->value[i]; for(j=A->U->index[i];j<A->U->index[i+1];j++) { index[nnz] = A->U->index[j]; value[nnz] = A->U->value[j]; nnz++; } for(j=A->L->index[i];j<A->L->index[i+1];j++) { index[nnz] = A->L->index[j]; value[nnz] = A->L->value[j]; nnz++; } index[i+1] = nnz; } } A->nnz = nnz; A->ndz = ndz; A->value = value; A->index = index; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_sort_msr" LIS_INT lis_matrix_sort_msr(LIS_MATRIX A) { LIS_INT i,n; LIS_DEBUG_FUNC_IN; if( !A->is_sorted ) { n = A->n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->L->ptr[i],A->L->ptr[i+1]-1,A->L->index,A->L->value); lis_sort_id(A->U->ptr[i],A->U->ptr[i+1]-1,A->U->index,A->U->value); } } else { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->ptr[i],A->ptr[i+1]-1,A->index,A->value); } } A->is_sorted = LIS_TRUE; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solve_msr" LIS_INT lis_matrix_solve_msr(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { t = b[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { t -= A->L->value[j] * x[A->L->index[j]]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { t = b[i]; for(j=A->U->index[i];j<A->U->index[i+1];j++) { t -= A->U->value[j] * x[A->U->index[j]]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = b[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { t -= A->L->value[j] * x[A->L->index[j]]; } x[i] = t * A->WD->value[i]; } for(i=n-1;i>=0;i--) { t = 0.0; for(j=A->U->index[i];j<A->U->index[i+1];j++) { if( A->U->index[j]>=n ) continue; t += A->U->value[j] * x[A->U->index[j]]; } x[i] -= t * A->WD->value[i]; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solvet_msr" LIS_INT lis_matrix_solvet_msr(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; lis_vector_copy(B,X); switch(flag) { case LIS_MATRIX_LOWER: for(i=n-1;i>=0;i--) { x[i] = x[i] * A->WD->value[i]; for(j=A->U->index[i];j<A->U->index[i+1];j++) { x[A->U->index[j]] -= A->U->value[j] * x[i]; } } break; case LIS_MATRIX_UPPER: for(i=0;i<n;i++) { x[i] = x[i] * A->WD->value[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { x[A->L->index[j]] -= A->L->value[j] * x[i]; } } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = x[i] * A->WD->value[i]; for(j=A->U->index[i];j<A->U->index[i+1];j++) { x[A->U->index[j]] -= A->U->value[j] * t; } } for(i=n-1;i>=0;i--) { t = x[i] * A->WD->value[i]; x[i] = t; for(j=A->L->index[i];j<A->L->index[i+1];j++) { x[A->L->index[j]] -= A->L->value[j] * t; } } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_crs2msr" LIS_INT lis_matrix_convert_crs2msr(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,k,jj; LIS_INT err; LIS_INT n,nnz,ndz; LIS_INT count; LIS_INT *iw; LIS_INT *index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; nnz = Ain->nnz; iw = NULL; index = NULL; value = NULL; iw = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2msr::iw" ); if( iw==NULL ) { LIS_SETERR_MEM((n+1)*sizeof(LIS_INT)); return LIS_ERR_OUT_OF_MEMORY; } /* check ndz */ for(i=0;i<n+1;i++) iw[i] = 0; count = 0; #ifdef _OPENMP #pragma omp parallel private(i,j) #endif { #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n;i++) { iw[i+1] = 0; for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++) { if( i==Ain->index[j] ) { iw[i+1] = 1; } } } #ifdef _OPENMP #pragma omp for reduction(+:count) #endif for(i=0;i<n;i++) { count += iw[i+1]; } #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n;i++) { iw[i+1] = Ain->ptr[i+1]-Ain->ptr[i]-iw[i+1]; } } ndz = n - count; err = lis_matrix_malloc_msr(n,nnz,ndz,&index,&value); if( err ) { lis_free2(3,index,value,iw); return err; } /* convert msr */ iw[0] = n+1; for(i=0;i<n;i++) { iw[i+1] = iw[i+1] + iw[i]; } #ifdef _OPENMP #pragma omp parallel private(i,j,k) #endif { #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n+1;i++) { index[i] = iw[i]; } #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n;i++) { k = index[i]; for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++) { jj = Ain->index[j]; if( jj==i ) { value[i] = Ain->value[j]; } else { value[k] = Ain->value[j]; index[k] = Ain->index[j]; k++; } } } } err = lis_matrix_set_msr(nnz,ndz,index,value,Aout); if( err ) { lis_free2(3,index,value,iw); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_free(iw); lis_matrix_storage_destroy(Aout); return err; } lis_free(iw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_msr2crs" LIS_INT lis_matrix_convert_msr2crs(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,k; LIS_INT err; LIS_INT n,nnz,is; LIS_INT *ptr,*index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; nnz = Ain->nnz; is = Ain->is; ptr = NULL; index = NULL; value = NULL; err = lis_matrix_malloc_crs(n,nnz,&ptr,&index,&value); if( err ) { return err; } /* convert crs */ #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { ptr[i+1] = Ain->index[i+1] - Ain->index[i]; if( Ain->value[i]!=0.0 ) { ptr[i+1]++; } } ptr[0] = 0; for(i=0;i<n;i++) { ptr[i+1] += ptr[i]; } #ifdef _OPENMP #pragma omp parallel for private(i,j,k) #endif for(i=0;i<n;i++) { k = ptr[i]; if( Ain->value[i]!=(LIS_SCALAR)0.0 ) { value[k] = Ain->value[i]; index[k] = i; k++; } for(j=Ain->index[i];j<Ain->index[i+1];j++) { value[k] = Ain->value[j]; index[k] = Ain->index[j]; k++; } } err = lis_matrix_set_crs(nnz,ptr,index,value,Aout); if( err ) { lis_free2(3,ptr,index,value); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t PixelRoundOffset(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(CastDoubleToLong(floor(x))); return(CastDoubleToLong(ceil(x))); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ crop_image=NewImageList(); width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=(Image *) NULL; crop_image=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); if (status != MagickFalse) Update8BIMClipPath(extent_image,image->columns,image->rows,geometry); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { Image *trim_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; trim_image=CropImage(image,&geometry,exception); if (trim_image != (Image *) NULL) Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry); return(trim_image); }
kernels.h
/* * Do most of a dot product computation using two input vectors, * output an array that simply needs to be summed at the end. */ void computeDotProductHelper( int *__restrict result, const int *__restrict v1, const int *__restrict v2, const int teams, const int vLength) { #pragma omp target teams num_teams(teams) thread_limit(threadsPerBlock) { int cache[threadsPerBlock]; #pragma omp parallel { const int cacheIndex = omp_get_thread_num(); const int blockIdx_x = omp_get_team_num(); int tid = cacheIndex + blockIdx_x * threadsPerBlock; int temp = 0; while (tid < vLength) { temp += v1[tid] * v2[tid]; tid += threadsPerBlock * teams; } cache[cacheIndex] = temp; #pragma omp barrier int i = threadsPerBlock/2; while(i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; #pragma omp barrier i /= 2; } if (cacheIndex == 0) result[blockIdx_x] = cache[0]; } } } /* * Count how many elements in a vector exceed some threshold, outputting an array that simply needs to be summed at the end to get the final result. */ void countAboveThresholdHelper( const float *__restrict array, const float threshold, int *__restrict counter, const int teams, const int arrayLength) { #pragma omp target teams num_teams(teams) thread_limit(threadsPerBlock) { int binomial[threadsPerBlock]; #pragma omp parallel { const int cacheIndex = omp_get_thread_num(); const int blockIdx_x = omp_get_team_num(); int tid = cacheIndex + blockIdx_x * threadsPerBlock; int ptemp = 0; while (tid < arrayLength) { if (array[tid] > threshold) { ptemp++; } tid += threadsPerBlock * teams; } binomial[cacheIndex] = ptemp; #pragma omp barrier int i = threadsPerBlock/2; while(i != 0) { if (cacheIndex < i) binomial[cacheIndex] += binomial[cacheIndex + i]; #pragma omp barrier i /= 2; } if (cacheIndex == 0) counter[blockIdx_x] = binomial[0]; } } } /* * Compute connection scores for randomized signatures, normalized to a given UCmax. * random should be a pointer to a (device) array containing uniformly distributed random values between 0-1 (including 0, but not including 1; these can be generated by CURAND). * A value > 0.5 indicates the randomly-selected gene that should be up-regulated, otherwise the gene should be down-regulated. * The same random number is then also used to select the gene, by rescaling its absolute value after subtracting 0.5 into an array index (effectively). */ void computeRandomConnectionScores( const float *__restrict random, const int *__restrict reffile, float *__restrict output, const int M, const float UCmax, const int setSize, const int nRandomGenerations) { #pragma omp target teams distribute parallel for thread_limit(threadsPerBlock) for (int idx = 0; idx < nRandomGenerations; idx++) { float temp = 0.0; for(int col = idx; col < M; col += nRandomGenerations) { // For converting to in-range indices, it helps to have the random numbers from 0 (inclusive) to 1 (non-inclusive) - so just flip them float n = 1.0f - random[col]; // We'll ultimately want to normalize our results by the setSize - do it now, when round-off errors will hopefully cost less float regulateFactor = 1.0f / setSize; // If our random number now is >= 0.5, we'll downregulate - and subtract 0.5 so that 0 <= random < 0.5 if (n >= 0.5f) { regulateFactor = -regulateFactor; n -= 0.5f; } // Scale up random to become an integer index < arraySizeEnd // (int) is not equal to __float2int_rd int rangeInArray = (int)(n * U133AArrayLength * 2); // Add or subtract the randomly selected value from the array to the cumulative total temp += reffile[rangeInArray] * regulateFactor; } // Update the output, further normalizing by UCmax output[idx] = temp / UCmax; } }
create_from_mesh.h
// Copyright (c) 2017, The OctNet authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef OCTREE_CREATE_FROM_MESH_CPU_H #define OCTREE_CREATE_FROM_MESH_CPU_H #include "octnet/create/create.h" #include "octnet/cpu/cpu.h" #include "octnet/geometry/geometry.h" #include <iostream> #include <sstream> #include <fstream> #include <vector> #include <cstdlib> #if defined(_OPENMP) #include <omp.h> #endif class OctreeCreateFromMeshHelperCpu : public OctreeCreateHelperCpu { public: OctreeCreateFromMeshHelperCpu(ot_size_t grid_depth_, ot_size_t grid_height_, ot_size_t grid_width_) : OctreeCreateHelperCpu(grid_depth_, grid_height_, grid_width_), tinds(grid_depth_ * grid_height_ * grid_width_) {} virtual ~OctreeCreateFromMeshHelperCpu() {} public: std::vector<std::vector<int> > tinds; }; class OctreeFromMesh : public OctreeCreateCpu { public: OctreeFromMesh(int n_verts_, float* verts_, int n_faces_, int* faces_, bool rescale_verts, ot_size_t depth_, ot_size_t height_, ot_size_t width_, int pad_) : OctreeCreateCpu((depth_ + 7) / 8, (height_ + 7) / 8, (width_ + 7) / 8, 1), depth(depth_), height(height_), width(width_), n_verts(n_verts_), verts(verts_), n_faces(n_faces_), faces(faces_), pad(pad_) { if(rescale_verts) { rescale(); } } virtual ~OctreeFromMesh() { } virtual octree* operator()(bool fit, int fit_multiply, bool pack, int n_threads) { //determine block triangle intersections int n_blocks = grid_depth * grid_height * grid_width; printf(" [OctreeCreateCpu] determine block triangle intersections\n"); OctreeCreateFromMeshHelperCpu helper(grid_depth, grid_height, grid_width); #if defined(_OPENMP) omp_set_num_threads(n_threads); #endif #pragma omp parallel for for(int grid_idx = 0; grid_idx < n_blocks; ++grid_idx) { int gd = grid_idx / (grid_height * grid_width); int gh = (grid_idx / grid_width) % grid_height; int gw = grid_idx % grid_width; float cx = gw * 8 + 4; float cy = gh * 8 + 4; float cz = gd * 8 + 4; block_triangles(cx,cy,cz, 8,8,8, helper.tinds[grid_idx]); } return create_octree(fit, fit_multiply, pack, n_threads, &helper); } protected: virtual void block_triangles(float cx, float cy, float cz, float vd, float vh, float vw, std::vector<int>& tinds) { for(int fidx = 0; fidx < n_faces; ++fidx) { float3 vx_c; vx_c.x = cx; vx_c.y = cy; vx_c.z = cz; float3 vx_w; vx_w.x = vw; vx_w.y = vh; vx_w.z = vd; float3 v0; v0.x = verts[faces[fidx * 3 + 0] * 3 + 0]; v0.y = verts[faces[fidx * 3 + 0] * 3 + 1]; v0.z = verts[faces[fidx * 3 + 0] * 3 + 2]; float3 v1; v1.x = verts[faces[fidx * 3 + 1] * 3 + 0]; v1.y = verts[faces[fidx * 3 + 1] * 3 + 1]; v1.z = verts[faces[fidx * 3 + 1] * 3 + 2]; float3 v2; v2.x = verts[faces[fidx * 3 + 2] * 3 + 0]; v2.y = verts[faces[fidx * 3 + 2] * 3 + 1]; v2.z = verts[faces[fidx * 3 + 2] * 3 + 2]; bool tria_inter = intersection_triangle_voxel(vx_c, vx_w, v0, v1, v2); if(tria_inter) { tinds.push_back(fidx); } } } virtual bool is_occupied(float cx, float cy, float cz, float vd, float vh, float vw, int gd, int gh, int gw, OctreeCreateHelperCpu* helper_) { OctreeCreateFromMeshHelperCpu* helper = dynamic_cast<OctreeCreateFromMeshHelperCpu*>(helper_); int grid_idx = helper->get_grid_idx(gd, gh, gw); std::vector<int>& tinds = helper->tinds[grid_idx]; for(size_t idx = 0; idx < tinds.size(); ++idx) { int fidx = tinds[idx]; float3 vx_c; vx_c.x = cx; vx_c.y = cy; vx_c.z = cz; float3 vx_w; vx_w.x = vw; vx_w.y = vh; vx_w.z = vd; float3 v0; v0.x = verts[faces[fidx * 3 + 0] * 3 + 0]; v0.y = verts[faces[fidx * 3 + 0] * 3 + 1]; v0.z = verts[faces[fidx * 3 + 0] * 3 + 2]; float3 v1; v1.x = verts[faces[fidx * 3 + 1] * 3 + 0]; v1.y = verts[faces[fidx * 3 + 1] * 3 + 1]; v1.z = verts[faces[fidx * 3 + 1] * 3 + 2]; float3 v2; v2.x = verts[faces[fidx * 3 + 2] * 3 + 0]; v2.y = verts[faces[fidx * 3 + 2] * 3 + 1]; v2.z = verts[faces[fidx * 3 + 2] * 3 + 2]; // printf("[%f,%f,%f] inter [%f,%f,%f], [%f,%f,%f], [%f,%f,%f]\n", // vx_c.x, vx_c.y, vx_c.z, // v0.x, v0.y, v0.z, // v1.x, v1.y, v1.z, // v2.x, v2.y, v2.z); bool tria_inter = intersection_triangle_voxel(vx_c, vx_w, v0, v1, v2); if(tria_inter) { // printf(" triang intersection at (%f,%f,%f)\n", vx_c.x,vx_c.y,vx_c.z); return true; } } return false; } virtual void get_data(bool oc, float cx, float cy, float cz, float vd, float vh, float vw, int gd, int gh, int gw, OctreeCreateHelperCpu* helper, ot_data_t* dst) { if(oc) { dst[0] = 1; } else { dst[0] = 0; } } void rescale() { float min_x = 1e9; float min_y = 1e9; float min_z = 1e9; float max_x = -1e9; float max_y = -1e9; float max_z = -1e9; for(int fidx = 0; fidx < n_faces; ++fidx) { for(int vidx = 0; vidx < 3; ++vidx) { min_x = FMIN(min_x, verts[faces[fidx * 3 + vidx] * 3 + 0]); min_y = FMIN(min_y, verts[faces[fidx * 3 + vidx] * 3 + 1]); min_z = FMIN(min_z, verts[faces[fidx * 3 + vidx] * 3 + 2]); max_x = FMAX(max_x, verts[faces[fidx * 3 + vidx] * 3 + 0]); max_y = FMAX(max_y, verts[faces[fidx * 3 + vidx] * 3 + 1]); max_z = FMAX(max_z, verts[faces[fidx * 3 + vidx] * 3 + 2]); } } // rescale vertices printf("bb before rescaling [%f,%f], [%f,%f], [%f,%f]\n", min_x, max_x, min_y, max_y, min_z, max_z); float src_width = FMAX(max_x - min_x, FMAX(max_y - min_y, max_z - min_z)); float dst_width = FMIN(depth - 2*pad, FMIN(height - 2*pad, width - 2*pad)); float o_ctr_x = (max_x + min_x)/2.f; float n_ctr_x = width/2.f; float o_ctr_y = (max_y + min_y)/2.f; float n_ctr_y = height/2.f; float o_ctr_z = (max_z + min_z)/2.f; float n_ctr_z = depth/2.f; for(int vidx = 0; vidx < n_verts; ++vidx) { verts[vidx * 3 + 0] = (verts[vidx * 3 + 0] - o_ctr_x) / src_width * dst_width + n_ctr_x; verts[vidx * 3 + 1] = (verts[vidx * 3 + 1] - o_ctr_y) / src_width * dst_width + n_ctr_y; verts[vidx * 3 + 2] = (verts[vidx * 3 + 2] - o_ctr_z) / src_width * dst_width + n_ctr_z; } printf("bb after rescaling [%f,%f], [%f,%f], [%f,%f]\n", (min_x - o_ctr_x) / src_width * dst_width + n_ctr_x, (max_x - o_ctr_x) / src_width * dst_width + n_ctr_x, (min_y - o_ctr_y) / src_width * dst_width + n_ctr_y, (max_y - o_ctr_y) / src_width * dst_width + n_ctr_y, (min_z - o_ctr_z) / src_width * dst_width + n_ctr_z, (max_z - o_ctr_z) / src_width * dst_width + n_ctr_z); } protected: const ot_size_t depth; const ot_size_t height; const ot_size_t width; ot_size_t n_verts; float* verts; ot_size_t n_faces; int* faces; int pad; }; #endif
zz2960ver2.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <omp.h> #define MAX_BIN_NUM 50 #define MAX_THREAD_NUM 100 void print_help(char *executable); int main(int argc, char *argv[]) { // Command line arguments processing char *executable = argv[0]; if (argc != 4) { printf("Error: invalid arguments\n\n"); print_help(executable); return 1; } char *bin_count_str = argv[1]; char *thread_count_str = argv[2]; char *file_name = argv[3]; // Open input file FILE *fp = fopen(file_name, "r"); if (fp == NULL) { printf("Error: cannot create file %s\n", file_name); return 1; } int bin_count = atoi(bin_count_str); int thread_count = atoi(thread_count_str); if (bin_count <= 0 || bin_count > MAX_BIN_NUM) { printf("Error: invalid bin count %s\n", bin_count_str); return 1; } if (thread_count <= 0 || thread_count > MAX_THREAD_NUM) { printf("Error: invalid thread count %s\n", thread_count_str); return 1; } // Read num count in input file int num_count = 0; fscanf(fp, "%d", &num_count); // Read in numbers double *nums = (double *)malloc(num_count * sizeof(double)); // use malloc to prevent segment fault when too many numbers for (int i = 0; i < num_count; i++) { fscanf(fp, "%lf", &nums[i]); } fclose(fp); // Intialize bin_counter array int bin_counter[bin_count]; for (int i = 0; i < bin_count; i++) { bin_counter[i] = 0; } // Calculate the range length of indexes for numbers to be processed in per thread int num_range_length = (int)ceil((double)num_count / thread_count); double start_time, finish_time; start_time = omp_get_wtime(); // record start time #pragma omp parallel for num_threads(thread_count) \ shared(bin_counter) for (int i = 0; i < thread_count; i++) { // Intialize local_bin_counter array int local_bin_counter[bin_count]; for (int i = 0; i < bin_count; i++) { local_bin_counter[i] = 0; } // Calculate number range for this thread int start = i * num_range_length; int end = i * num_range_length + num_range_length; // Go through all numbers in assigned range for (int j = start; j < end && j < num_count; j++) { // Find bin index for number j int bin_index = (int)(nums[j] * bin_count / 100.0); // Increase the local counter local_bin_counter[bin_index]++; } // Increase the global counter for (int j = 0; j < bin_count; j++) { bin_counter[j] += local_bin_counter[j]; } } finish_time = omp_get_wtime(); // Print out result for (int i = 0; i < bin_count; i++) { printf("bin[%d]=%d\n", i, bin_counter[i]); } // Print time statistics printf("Parallel part finished in %lf sec.\n", finish_time - start_time); } void print_help(char *executable) { printf("usage: %s b t filename\n\n", executable); printf("A parallel version of histagram statistics counter where each thread is responsible for a subset of the numbers.\n\n"); printf("positional arguments:\n"); printf(" b the number of bins, 0 < b <= %d\n", MAX_BIN_NUM); printf(" t the number of threads, 0 < t <= %d\n", MAX_THREAD_NUM); printf(" filename the name of the text file that contains the floating point numbers\n"); }
base.c
#define _DEFAULT_SOURCE #include <stdio.h> #include <unistd.h> #include <omp.h> int NITER=100; int N=100; int main(){ for(int i = 0; i < NITER; ++i){ float counter = 0.0; #pragma omp parallel for for(int j = 0; j < N; ++j){ // Useful work here counter = counter + 1; usleep(1); } } }
mpiCodeGenerator.h
/* * * * V 0.2 using real frontend parser and dedicated OpenMP-like AST nodes for program representation * This is necessary to parse complex extended map clause with dist_data info. * The previous version's MPI_PragmaAttribute is no longer used. * * Liao 12/11/2015 * * V 0.1 * Parsing pragmas and generating MPI code from input sequential code * Pragma is OpenMP style, reusing OmpAttribute to store information * As a experiments, a lightweight recursive descendent parser is used to parse the pragmas * Liao 9/22/2015 * */ #ifndef MPI_Code_Generator_h #define MPI_Code_Generator_h #include <vector> #include <string> namespace MPI_Code_Generator { //------------ v 0.2 interface, expecting the extended ROSE frontend to parse and create OpenMP AST nodes // using -rose:openmp:ast_only command line option to active the frontend support void lower_xomp (SgSourceFile* file); //! Translate target device(mpi:master) begin ... void transMPIDeviceMaster (SgOmpTargetStatement * t_stmt); void transOmpTargetParallelLoop (SgOmpForStatement* loop); //! Translate mapped scalars and arrays, return a reference distributed local array portion size, used for loop bound later. SgVariableDeclaration* transOmpMapVariables (SgOmpTargetStatement* ); //! Translate a loop affected void transForLoop (SgForStatement* for_stmt, SgVariableDeclaration* local_size_decl); // convert a C data type into MPI type name std::string C2MPITypeName (SgType*); //! Create MPI_Bcast() function call for a single variable SgExprStatement* buildMPI_Bcast(SgVariableSymbol* var_sym, int source_rank_id, SgScopeStatement* insertion_scope); //! Create MPI_Barrier (); SgExprStatement* buildMPI_Barrier(SgScopeStatement* insertion_scope); //--------------- v 0.1 interface, no longer being used. class MPI_PragmaAttribute; //int generateMPI (SgSourceFile* sfile); //! A prototype parser for directives guiding MPI code generation void parsePragmas(SgSourceFile* sfile, std::vector <MPI_PragmaAttribute*>& MPI_Pragma_Attribute_List); //! Translate generated Pragma Attributes void translatePragmas (std::vector <MPI_PragmaAttribute*>& MPI_Pragma_Attribute_List); //! Setup MPI initialization void setupMPIInit(SgSourceFile* sfile); //! Setup MPI finalize void setupMPIFinalize(SgSourceFile* sfile); // pragma enum values. // For quick prototyping, we use AstAttributes instead of dedicated AST nodes for storing parsed results. enum mpi_pragma_enum { // for main function, what is the default semantics for code if no directives are present ? // run by all processes (spmd) vs. run only by master process, or must be explicitly declared ( device (mpi:all)) // #pragma omp mpi_device_default(mpi:all|mpi:master|explicit) e_mpi_all, e_mpi_master, e_semantics_explicit, //#pragma omp mpi_device_default(mpi:all|mpi:master|explicit) pragma_mpi_device_default, //#pragma omp target device(mpi:all) begin pragma_mpi_device_all_begin, //#pragma omp target device(mpi:all) end pragma_mpi_device_all_end, // #pragma omp target device(mpi:master) begin pragma_mpi_device_master_begin, // #pragma omp target device(mpi:master) end pragma_mpi_device_master_end, // pragma omp target device(mpi:all) map ( dist_data) pragma_mpi_device_all_map_dist, //#pragma omp parallel for pragma_parallel_for, pragma_last }; // Global settings for the code generation extern mpi_pragma_enum mpi_device_default_choice; class MPI_PragmaAttribute: public AstAttribute { public: SgPragmaDeclaration* pragma_node; // the associated AST node for pragma enum mpi_pragma_enum pragma_type; enum mpi_pragma_enum default_semantics; MPI_PragmaAttribute (SgPragmaDeclaration* n , mpi_pragma_enum p_type): pragma_node(n), pragma_type(p_type) { default_semantics = e_semantics_explicit; } // convert the attribute back to string format std::string toString(); }; // end class // parse a single pragma declaration, internal use only extern AstAttribute* parse_MPI_Pragma (SgPragmaDeclaration* pragmaDecl); // parse pragmas in an input file void parsePragmas(SgSourceFile* sfile); } // end namespace #endif //MPI_Code_Generator_h
Interp1PrimFifthOrderCompactUpwind.c
/*! @file Interp1PrimFifthOrderCompactUpwind.c * @brief 5th order compact upwind scheme (component-wise application to vectors). * @author Debojyoti Ghosh */ #include <stdio.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <tridiagLU.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 3 /*! @brief 5th order compact upwind reconstruction (component-wise) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the fifth order compact upwind scheme on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order compact upwind numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as: \f{align}{ \frac{3}{10}\hat{\bf f}_{j-1/2} + \frac{6}{10}\hat{\bf f}_{j+1/2} + \frac{1}{10}\hat{\bf f}_{j+3/2} = \frac{1}{30}{\bf f}_{j-1} + \frac{19}{30}{\bf f}_j + \frac{1}{3}{\bf f}_{j+1}. \f} The resulting tridiagonal system is solved using tridiagLU() (see also #TridiagLU, tridiagLU.h). \b Implementation \b Notes: + This method assumes a uniform grid in the spatial dimension corresponding to the interpolation. + The method described above corresponds to a left-biased interpolation. The corresponding right-biased interpolation can be obtained by reflecting the equations about interface j+1/2. + The scalar interpolation method is applied to the vector function in a component-wise manner. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). \b Reference: + Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with Weighted ENO Limiting for Hyperbolic Conservation Laws, SIAM Journal on Scientific Computing, 34 (3), 2012, A1678–A1706, http://dx.doi.org/10.1137/110857659 */ int Interp1PrimFifthOrderCompactUpwind( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; MPIVariables *mpi = (MPIVariables*) m; CompactScheme *compact= (CompactScheme*) solver->compact; TridiagLU *lu = (TridiagLU*) solver->lusolver; int sys,Nsys,d,v; _DECLARE_IERR_; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double one_third = 1.0/3.0, thirteen_by_sixty = 13.0/60.0, fortyseven_by_sixty = 47.0/60.0, twentyseven_by_sixty = 27.0/60.0, one_by_twenty = 1.0/20.0, one_by_thirty = 1.0/30.0, nineteen_by_thirty = 19.0/30.0, three_by_ten = 3.0/10.0, six_by_ten = 6.0/10.0, one_by_ten = 1.0/10.0; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); /* calculate total number of tridiagonal systems to solve */ _ArrayProduct1D_(bounds_outer,ndims,Nsys); Nsys *= nvars; /* Allocate arrays for tridiagonal system */ double *A = compact->A; double *B = compact->B; double *C = compact->C; double *R = compact->R; #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) for (sys=0; sys < N_outer; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1,qm2,qm3,qp1,qp2,p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); if (upw > 0) { indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); qm3 = qm1 - 2*stride[dir]; qm2 = qm1 - stride[dir]; qp1 = qm1 + stride[dir]; qp2 = qm1 + 2*stride[dir]; } else { indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); qm3 = qm1 + 2*stride[dir]; qm2 = qm1 + stride[dir]; qp1 = qm1 - stride[dir]; qp2 = qm1 - 2*stride[dir]; } /* Defining stencil points */ double *fm3, *fm2, *fm1, *fp1, *fp2; fm3 = fC+qm3*nvars; fm2 = fC+qm2*nvars; fm1 = fC+qm1*nvars; fp1 = fC+qp1*nvars; fp2 = fC+qp2*nvars; if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use 5th order upwind at the physical boundaries */ _ArraySetValue_((A+Nsys*indexI[dir]+sys*nvars),nvars,0.0) _ArraySetValue_((B+Nsys*indexI[dir]+sys*nvars),nvars,1.0) _ArraySetValue_((C+Nsys*indexI[dir]+sys*nvars),nvars,0.0) for (v=0; v<nvars; v++) { (R+Nsys*indexI[dir]+sys*nvars)[v] = one_by_thirty * fm3[v] - thirteen_by_sixty * fm2[v] + fortyseven_by_sixty * fm1[v] + twentyseven_by_sixty * fp1[v] - one_by_twenty * fp2[v]; } } else { /* Use 5th order upwind at the physical boundaries */ if (upw > 0) { _ArraySetValue_((A+Nsys*indexI[dir]+sys*nvars),nvars,three_by_ten); _ArraySetValue_((B+Nsys*indexI[dir]+sys*nvars),nvars,six_by_ten ); _ArraySetValue_((C+Nsys*indexI[dir]+sys*nvars),nvars,one_by_ten ); } else { _ArraySetValue_((C+Nsys*indexI[dir]+sys*nvars),nvars,three_by_ten); _ArraySetValue_((B+Nsys*indexI[dir]+sys*nvars),nvars,six_by_ten ); _ArraySetValue_((A+Nsys*indexI[dir]+sys*nvars),nvars,one_by_ten ); } for (v=0; v<nvars; v++) { (R+Nsys*indexI[dir]+sys*nvars)[v] = one_by_thirty * fm2[v] + nineteen_by_thirty * fm1[v] + one_third * fp1[v]; } } } } #ifdef serial /* Solve the tridiagonal system */ IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,NULL); CHECKERR(ierr); #else /* Solve the tridiagonal system */ /* all processes except the last will solve without the last interface to avoid overlap */ if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR tridiagLU(A,B,C,R,dim[dir] ,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); } else { IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); } /* Now get the solution to the last interface from the next proc */ double *sendbuf = compact->sendbuf; double *recvbuf = compact->recvbuf; MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL}; if (mpi->ip[dir]) for (d=0; d<Nsys; d++) sendbuf[d] = R[d]; if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]); if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]); MPI_Waitall(2,&req[0],MPI_STATUS_IGNORE); if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d]; #endif /* save the solution to fI */ #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) for (sys=0; sys < N_outer; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); _ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars); } } return(0); }
helper_classes_for_constraint_builder.h
// ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Aditya Ghantasala // // #if !defined(AUXILIARY_GLOBAL_MASTER_SLAVE_RELATION) #define AUXILIARY_GLOBAL_MASTER_SLAVE_RELATION // System includes #include <vector> #include <unordered_map> // project includes #include "includes/define.h" #include "includes/dof.h" #include "includes/node.h" #include "includes/lock_object.h" namespace Kratos { namespace Internals { ///@name Internals Globals ///@{ ///@} ///@name Type Definitions ///@{ /// Geometric definitions typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; /// Matrix and vector definition typedef Kratos::Matrix MatrixType; typedef Kratos::Vector VectorType; /// Indexes definition typedef IndexedObject::IndexType IndexType; typedef std::vector<IndexType> VectorIndexType; ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ /** * @brief this method checks if any of the nodes of the given rGeometry is marked SLAVE. * @param rGeometry The geometry to check for. */ bool HasSlaveNode(GeometryType& rGeometry) { for(auto& node : rGeometry) if (node.IsDefined(SLAVE)) return node.Is(SLAVE); return false; } /** * @brief This function resizes the given matrix and vector pair to the new size provided. * And Initializes the extra part added to zero. * @param rMatrix matrix to be resized * @param rVector vector to be resized * @param FinalSize the final size of the resized quantities. */ void ResizeAndInitializeLocalMatrices(MatrixType& rMatrix, VectorType& rVector, IndexType FinalSize) { KRATOS_TRY // storing the initial matrix and vector and their properties KRATOS_ERROR_IF(rMatrix.size1() != rVector.size())<<"ResizeAndInitializeLocalMatrices :: Dimension of the matrix and vector passed are not the same !"<<std::endl; const IndexType initial_sys_size = rMatrix.size1(); MatrixType matrix(initial_sys_size, initial_sys_size); noalias(matrix) = rMatrix; VectorType vector(initial_sys_size); noalias(vector) = rVector; rMatrix.resize(FinalSize, FinalSize, false); rVector.resize(FinalSize, false); // reassigning the original part of the matrix for (IndexType m = 0; m < initial_sys_size; ++m) { for (IndexType n = 0; n < initial_sys_size; ++n) { rMatrix(m,n) = matrix(m,n); } rVector(m) = vector(m); } // Making the extra part of matrix zero for (IndexType m = initial_sys_size; m < FinalSize; ++m) { for (IndexType n = 0; n < FinalSize; ++n) { rMatrix(m, n) = 0.0; rMatrix(n, m) = 0.0; } rVector(m) = 0.0; } KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::ResizeAndInitializeLocalMatrices failed .."); } ///@} ///@name Internals Classes ///@{ /** * @class AuxiliaryGlobalMasterSlaveConstraint * @ingroup KratosCore * @brief This class stores the information regarding the AuxiliaryGlobalMasterSlaveConstraint equation. * Naming convention is defined like this. (each object of this class will store one equation in the given form * * SlaveEquationId = w_1*MasterEquationId_1 + w_2*MasterEquationId_2 + ..... + w_n*MasterEquationId_n * * This stores the condensed form of the MasterSlaveConstraint objects into one object. if only one relation for a slave is added as * MasterSlaveConstraint then there will only be one entry for master for its corresponding AuxiliaryGlobalMasterSlaveConstraint. * Currently this class is designed to hold only one equation. There is only one unique object of this class for each slave. * * Future plan is to also make it possible to work with matrices (T) and vectors (for slave and master equation ids and constants) * * * IMPORTANT : This is not seen by the user. This is a helper data structure which is exists only in the builder and solver. * * @author Aditya Ghantasala */ class AuxiliaryGlobalMasterSlaveConstraint : public IndexedObject { public: ///@name Type Definitions ///@{ typedef IndexedObject BaseType; typedef Internals::IndexType IndexType; typedef Internals::MatrixType MatrixType; typedef Internals::VectorType VectorType; typedef std::vector<IndexType> EquationIdVectorType; /// Pointer definition of AuxiliaryGlobalMasterSlaveConstraint KRATOS_CLASS_POINTER_DEFINITION(AuxiliaryGlobalMasterSlaveConstraint); ///@} ///@name Life Cycle ///@{ /** * @brief Constructor of the class * @param SlaveEquationId the slave equation id for which this class is being constructed. */ explicit AuxiliaryGlobalMasterSlaveConstraint(IndexType SlaveEquationId = 0) : IndexedObject(SlaveEquationId), mLhsValue(0.0), mRhsValue(0.0) { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Function to get the slave equation Id corresponding to this constraint. * @param Constant the value of the constant to be assigned. */ IndexType SlaveEquationId() const { return this->Id(); } /** * @brief Function to set the lefthand side of the constraint (the slave dof value) * @param LhsValue the value of the lhs (the slave dof value) */ void SetLeftHandSide(const double LhsValue) { mLockObject.SetLock(); mLhsValue = LhsValue; mLockObject.UnSetLock(); } /** * @brief Function to update the righthand side of the constraint (the combination of all the master dof values and constants) * @param RHSValue the value of the lhs (the slave dof value) */ void SetRightHandSide(const double RhsValue) { mRhsValue = RhsValue; } void UpdateRightHandSide(const double RhsValueUpdate) { mLockObject.SetLock(); mRhsValue = mRhsValue + RhsValueUpdate; mLockObject.UnSetLock(); } // Get number of masters for this slave IndexType NumberOfMasters() const { return mMasterEquationIdVector.size(); } /** * @brief this determines the master equation IDs connected to this constraint * @param rResult the elemental equation ID vector */ virtual void EquationIdsVector(IndexType& rSlaveEquationId, EquationIdVectorType& rMasterEquationIds) { if (rMasterEquationIds.size() != mMasterEquationIdVector.size()) rMasterEquationIds.resize(this->NumberOfMasters(), false); rSlaveEquationId = this->SlaveEquationId(); rMasterEquationIds = mMasterEquationIdVector; } /** * @brief this is called during the assembling process in order * to calculate all elemental contributions to the global system * matrix and the right hand side * @param rMasterWeightsVector the elemental left hand side matrix * @param rConstant the elemental right hand side */ virtual void CalculateLocalSystem(VectorType &rMasterWeightsVector, double &rConstant) { if (rMasterWeightsVector.size() != this->NumberOfMasters()) rMasterWeightsVector.resize(this->NumberOfMasters(), false); for (IndexType i = 0; i < this->NumberOfMasters(); ++i) rMasterWeightsVector(i) = mMasterWeightsVector[i]; /// Here this is required because, when in the builder and solver , we are actually imposing the constraint on the update /// of the DOF value (residual formulation), this does not necessarily guarantee the DOFs themselves follow the constraint equation. /// So, we calculate the LHS value and RHS value of the constraint equation (with DOF values) and if they are not /// satisfying the constraint, we use the residual as the constant. rConstant = mRhsValue - mLhsValue; } /** * @brief This method clears the equations ids */ void Clear() { //clearing the contents mMasterEquationIdVector.clear(); mMasterWeightsVector.clear(); } /** * @brief This method adds a new master */ void AddMaster(const IndexType MasterEquationId, const double Weight) { const int index = GetMasterEquationIdPosition(MasterEquationId); if (index >= 0) { #pragma omp atomic mMasterWeightsVector[index] += Weight; } else { mLockObject.SetLock(); // locking for exclusive access to the vectors mMasterEquationIdVector and mMasterWeightsVectors mMasterEquationIdVector.push_back(MasterEquationId); mMasterWeightsVector.push_back(Weight); mLockObject.UnSetLock(); // unlocking } } /** * @brief This method resers the LHS/RHS relationship */ void Reset() { this->mLhsValue = 0.0; this->mRhsValue = 0.0; } /** * @brief This method returns the correspondin EquationId for the master */ int GetMasterEquationIdPosition(const IndexType MasterEquationId) const { auto it = find(mMasterEquationIdVector.begin(), mMasterEquationIdVector.end(), MasterEquationId); if (it != mMasterEquationIdVector.end()) return it - mMasterEquationIdVector.begin(); else return -1; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "AuxiliaryGlobalMasterSlaveConstraint # " << this->Id(); return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mLhsValue; double mRhsValue; EquationIdVectorType mMasterEquationIdVector; std::vector<double> mMasterWeightsVector; LockObject mLockObject; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Serialization ///@{ friend class Serializer; void save(Serializer &rSerializer) const override { // No need to save anything from this class as they will be reconstructed KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, IndexedObject); } void load(Serializer &rSerializer) override { // No need to load anything from this class as they will be reconstructed KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, IndexedObject); } ///@} }; // End of ConstraintEquation class /** * @struct LocalIndices * @ingroup KratosCore * @brief This class stores the stores three different vectors of local internal, slave, master indices * which are used in constraint builder and solver. * * @author Aditya Ghantasala */ struct LocalIndices { typedef Internals::IndexType IndexType; typedef Internals::VectorIndexType VectorIndexType; void Reset() { internal_index_vector.resize(0); master_index_vector.resize(0); slave_index_vector.resize(0); container_master_weights.resize(0); container_master_slaves.resize(0); processed_master_indices.resize(0); } VectorIndexType internal_index_vector; // indicies corresponding to internal DOFs VectorIndexType master_index_vector; // indicies corresponding to master DOFs VectorIndexType slave_index_vector; // indicies corresponding to slave DOFs std::vector<double> container_master_weights; // list of master weights in the order in which they are processed std::vector<IndexType> container_master_slaves; // list of slave indices corresponding to each master processed std::vector<IndexType> processed_master_indices; // list of master indices in the order in which they are processed. }; ///@} ///@name Type Definitions ///@{ /// AuxiliaryGlobalMasterSlaveConstraint definitions typedef Internals::AuxiliaryGlobalMasterSlaveConstraint AuxiliaryGlobalMasterSlaveConstraintType; //typedef PointerVectorSet<AuxiliaryGlobalMasterSlaveConstraint, IndexedObject> GlobalMasterSlaveRelationContainerType; typedef std::unordered_map< IndexType, unique_ptr< AuxiliaryGlobalMasterSlaveConstraintType > > GlobalMasterSlaveRelationContainerType; ///@} ///@name Internal Classes ///@{ /** * @class ConstraintImposer * @ingroup KratosCore * @author Aditya Ghantasala */ template <class TSparseSpace, class TDenseSpace, class TLinearSolver > // Made template to include the possibility to work with both local and global matrices for imposing the constraints. class ConstraintImposer { public: ///@name Type Definitions ///@{ typedef Internals::AuxiliaryGlobalMasterSlaveConstraint AuxiliaryGlobalMasterSlaveRelationType; typedef std::unordered_map< IndexType, unique_ptr< AuxiliaryGlobalMasterSlaveRelationType > > GlobalMasterSlaveRelationContainerType; typedef std::vector<Dof<double>::Pointer> DofsVectorType; typedef typename TDenseSpace::MatrixType LocalSystemMatrixType; typedef typename TDenseSpace::VectorType LocalSystemVectorType; typedef Internals::LocalIndices LocalIndicesType; typedef Kratos::Matrix MatrixType; typedef Kratos::Vector VectorType; typedef std::vector<IndexType> VectorIndexType; typedef std::vector<IndexType> EquationIdVectorType; ///@} ///@name Life Cycle ///@{ explicit ConstraintImposer(GlobalMasterSlaveRelationContainerType& rGlobalMasterSlaveRelations) : mrGlobalMasterSlaveConstraints(rGlobalMasterSlaveRelations) { } ~ConstraintImposer() { } ConstraintImposer( const ConstraintImposer &OtherObject) : mrGlobalMasterSlaveConstraints (OtherObject.mrGlobalMasterSlaveConstraints) // copy constructor { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief This adds the equation IDs of masters of all the slaves corresponding to pCurrentElement to EquationIds * @details Here cannot use the pure Geometry because, we would need the dof list from the element/geometry. * @param rCurrentContainer the element or condition where the rEquationIds to be modified for master-slave constraints * @param rEquationIds the equation id vector for the above element or condition * @param rCurrentProcessInfo the current process info */ template <typename TContainerType> void ApplyConstraints(TContainerType& rCurrentContainer, typename TContainerType::EquationIdVectorType& rEquationIds, ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY this->Reset(); // If no slave is found for this container , no need of going on if (! Internals::HasSlaveNode(rCurrentContainer.GetGeometry())) { return; } DofsVectorType ContainerDofs; rCurrentContainer.GetDofList(ContainerDofs, rCurrentProcessInfo); IndexType slave_equation_id; // For each node check if it is ac slave or not If it is .. we change the Transformation matrix for (IndexType j = 0; j < ContainerDofs.size(); j++) { slave_equation_id = ContainerDofs[j]->EquationId(); // consider everything as a slave. // Get the global constraint equation for this slave. auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(slave_equation_id); if (global_master_slave_constraint != mrGlobalMasterSlaveConstraints.end()) { // if a equation exists for this slave global_master_slave_constraint->second->EquationIdsVector(slave_equation_id, mMasterEquationIds); // get the slave and master equation ids for this slave. rEquationIds.reserve(mMasterEquationIds.size()); for (auto &master_eq_id : mMasterEquationIds) { // Add the current slaves master eq ids to the equation ids rEquationIds.push_back(master_eq_id); } } } KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::ApplyConstraints failed .."); } /** * @brief This function modifies the LHS and RHS of the rCurrentContainer to account for any master-slave constraints its nodes/dofs * are carrying. * @details Here cannot use the pure Geometry because, we would need the dof list from the element/geometry. * @param rCurrentContainer the element or condition where the rEquationIds to be modified for master-slave constraints * @param rLHSContribution the LHS contribution of the rCurrentContainer * @param rRHSContribution the RHS contribution of the rCurrentContainer * @param rEquationIds the equation id vector for the above element or condition * @param rCurrentProcessInfo the current process info */ template <typename TContainerType> void ApplyConstraints(TContainerType& rCurrentContainer, LocalSystemMatrixType& rLHSContribution, LocalSystemVectorType& rRHSContribution, typename TContainerType::EquationIdVectorType& rEquationIds, ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY // If no slave is found for this container , no need of going on if (! Internals::HasSlaveNode(rCurrentContainer.GetGeometry())) return; this->Reset(); // Saving th original system size const IndexType initial_sys_size = rLHSContribution.size1(); // first fill in the rEquationIds using the above function (overloaded one) ApplyConstraints<TContainerType>(rCurrentContainer, rEquationIds, rCurrentProcessInfo); // now rEquationIds has all the slave equation ids appended to it. IndexType total_number_of_masters = rEquationIds.size() - initial_sys_size; // Calculating the local indices corresponding to internal, master, slave dofs of this container CalculateLocalIndices(rEquationIds, mLocalIndices, total_number_of_masters); // resizing the matrices to the new required length ResizeAndInitializeLocalMatrices(rLHSContribution, rRHSContribution, rEquationIds.size()); // Calculating the F = T'*(F-K*g) which is local to this container ModifyRHSForConstraints(rLHSContribution, rRHSContribution, rEquationIds); // Calculating the K = T' * K *T which is local to this container ModifyLHSForConstraints(rLHSContribution, rRHSContribution, rEquationIds); KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise:: Applying Multipoint constraints failed .."); } ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ GlobalMasterSlaveRelationContainerType& mrGlobalMasterSlaveConstraints; // For Formulating which are the internal, slave indices locally. LocalIndicesType mLocalIndices; // container's transformation matrix and constant vector MatrixType mTransformationMatrixLocal; VectorType mConstantVectorLocal; // containers for holding equation ids and container dofs EquationIdVectorType mMasterEquationIds; DofsVectorType mContainerDofs; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This function does two operations : K = T' * K * T . This operations are done in place. * Meaning that there is no memory duplication and no explicit matrix and matrix or matrix vector multiplication. * Individual entries of K and F are modified to achieve the result. * @param rLHSContribution The lhs matrix of the container * @param rRHSContribution The rhs vector of the container * @param rEquationIds the list of equation ids (extended with the masters). */ void ModifyLHSForConstraints(MatrixType &rLHSContribution, VectorType& rRHSContribution, EquationIdVectorType &rEquationIds) { mLocalIndices.container_master_weights.reserve(mLocalIndices.master_index_vector.size()); mLocalIndices.container_master_slaves.reserve(mLocalIndices.master_index_vector.size()); mLocalIndices.processed_master_indices.reserve(mLocalIndices.master_index_vector.size()); IndexType slave_equation_id; EquationIdVectorType master_equation_ids; VectorType master_weights_vector; double slave_constant; for (auto& slave_index : mLocalIndices.slave_index_vector) { // Loop over all the slaves for this container // Get the global equation for this constraint auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(rEquationIds[slave_index]); // Get the tranformation matrix and constant_vector from the current slave global_master_slave_constraint->second->EquationIdsVector(slave_equation_id, master_equation_ids); global_master_slave_constraint->second->CalculateLocalSystem(master_weights_vector, slave_constant); IndexType master_index = 0; double master_weight = 0.0; IndexType i_master = 0; for (auto& master_eq_id : master_equation_ids) { // Loop over all the masters the slave has master_index = std::distance(rEquationIds.begin(), std::find(rEquationIds.begin(), rEquationIds.end(), master_eq_id)); //master_weight = mTransformationMatrixLocal(slave_index,master_index); master_weight = master_weights_vector(i_master); for (auto& internal_index : mLocalIndices.internal_index_vector) { // For K(m,u) and K(u,m) rLHSContribution(internal_index, master_index) += rLHSContribution(internal_index, slave_index) * master_weight; rLHSContribution(master_index, internal_index) += rLHSContribution(slave_index, internal_index) * master_weight; } mLocalIndices.container_master_weights.push_back( master_weight ); mLocalIndices.container_master_slaves.push_back( slave_index ); mLocalIndices.processed_master_indices.push_back( master_index ); i_master++; } // Loop over all the masters the slave has } //Adding contribution from slave to Kmm IndexType master_i = 0; for (auto& master_index : mLocalIndices.processed_master_indices) { IndexType master_i_other = 0; for (auto& master_index_other : mLocalIndices.processed_master_indices) { rLHSContribution(master_index, master_index_other) += mLocalIndices.container_master_weights[master_i] * rLHSContribution(mLocalIndices.container_master_slaves[master_i], mLocalIndices.container_master_slaves[master_i_other]) * mLocalIndices.container_master_weights[master_i_other]; master_i_other++; } master_i++; } // For K(u,s) and K(s,u). This is to be done at the end only for (auto& slave_index : mLocalIndices.slave_index_vector) { for (auto& internal_index : mLocalIndices.internal_index_vector) { rLHSContribution(slave_index, internal_index) = 0.0; rLHSContribution(internal_index, slave_index) = 0.0; } } } /** * @brief This function does two operation : F = T'*(F-K*b). This operation is done in place. * Meaning that there is no memory duplication and no explicit matrix and matrix or matrix vector multiplication. * Individual entries of K and F are modified to achieve the result. * @param rLHSContribution The lhs matrix of the container * @param rRHSContribution The rhs vector of the container * @param rEquationIds the list of equation ids (extended with the masters). */ void ModifyRHSForConstraints(MatrixType &rLHSContribution, VectorType& rRHSContribution, EquationIdVectorType &rEquationIds) { IndexType slave_equation_id; EquationIdVectorType master_equation_ids; VectorType master_weights_vector; double slave_constant; VectorType master_weights_vector_other; double constant_other; for (auto& slave_index : mLocalIndices.slave_index_vector) { // Loop over all the slaves for this container // Get the global equation for this constraint auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(rEquationIds[slave_index]); // Get the tranformation matrix and constant_vector from the current slave global_master_slave_constraint->second->EquationIdsVector(slave_equation_id, master_equation_ids); global_master_slave_constraint->second->CalculateLocalSystem(master_weights_vector, slave_constant); IndexType master_index = 0; double master_weight = 0.0; IndexType i_master = 0; for (auto& master_eq_id : master_equation_ids) { // Loop over all the masters the slave has master_index = std::distance(rEquationIds.begin(), std::find(rEquationIds.begin(), rEquationIds.end(), master_eq_id)); //master_weight = mTransformationMatrixLocal(slave_index,master_index); master_weight = master_weights_vector(i_master); for (auto& internal_index : mLocalIndices.internal_index_vector) { rRHSContribution(internal_index) -= rLHSContribution(internal_index, slave_index) * slave_constant; } // For RHS(m) += A'*LHS(s,s)*B for (auto& slave_index_other : mLocalIndices.slave_index_vector) { auto global_master_slave_constraint_other = mrGlobalMasterSlaveConstraints.find(rEquationIds[slave_index_other]); global_master_slave_constraint_other->second->CalculateLocalSystem(master_weights_vector_other, constant_other); rRHSContribution(master_index) -= rLHSContribution(slave_index, slave_index_other) * master_weight * constant_other; } // Changing the RHS side of the equation rRHSContribution(master_index) += master_weight * rRHSContribution(slave_index); i_master++; } // Loop over all the masters the slave has rRHSContribution(slave_index) = 0.0; } } /** * @brief Resets the member vectors and matrices to zero and zero size */ void Reset() { mLocalIndices.Reset(); mTransformationMatrixLocal.resize(0,0, false); mConstantVectorLocal.resize(0, false); mMasterEquationIds.clear(); mContainerDofs.clear(); } /** * @brief This function calculates the local indices of a given element or condition * @param rEquationIds vector of the equation ids * @param rLocalIndexStructure reference to the structure of LocalIndicesType */ void CalculateLocalIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure, IndexType rTotalNumberOfMasters) { CalculateLocalSlaveIndices(rEquationIds, rLocalIndexStructure); CalculateLocalInternalIndices(rEquationIds, rLocalIndexStructure); CalculateLocalMasterIndices(rEquationIds, rLocalIndexStructure, rTotalNumberOfMasters); } /** * @brief This function calculates the local slave indices of a given element or condition * @param rEquationIds vector of the equation ids * @param rLocalSlaveIndexVector reference to the vector of slave indices */ void CalculateLocalSlaveIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure) { KRATOS_TRY int index = 0; for (auto &eq_id : rEquationIds) { auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(eq_id); if (global_master_slave_constraint != mrGlobalMasterSlaveConstraints.end()) rLocalIndexStructure.slave_index_vector.push_back(index); index++; } KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::CalculateLocalSlaveIndices failed .."); } /** * @brief This function calculates the local internal indices of a given element or condition * @param rEquationIds vector of the equation ids * @param rLocalIndexStructure reference to the vector of slave indices */ void CalculateLocalInternalIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure) { KRATOS_TRY VectorIndexType local_index_vector(rEquationIds.size()); for (IndexType i = 0; i<rEquationIds.size(); ++i) local_index_vector[i] = i; std::sort(local_index_vector.begin(), local_index_vector.end()); std::sort(rLocalIndexStructure.slave_index_vector.begin(), rLocalIndexStructure.slave_index_vector.end()); std::set_difference(local_index_vector.begin(), local_index_vector.end(), rLocalIndexStructure.slave_index_vector.begin(), rLocalIndexStructure.slave_index_vector.end(), std::back_inserter(rLocalIndexStructure.internal_index_vector)); KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::CalculateLocalInternalIndices failed .."); } /** * @brief This function calculates the local internal indices of a given element or condition * @param rEquationIds vector of the equation ids * @param rLocalIndexStructure reference to the vector of slave indices * @param rTotalNumberOfMasters total number of masters for the given element or condition. */ void CalculateLocalMasterIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure, IndexType rTotalNumberOfMasters) { // Get number of master indices for this current container rLocalIndexStructure.master_index_vector.reserve(rTotalNumberOfMasters + rEquationIds.size() ); for (IndexType i = rEquationIds.size()-1; i < rEquationIds.size() -rTotalNumberOfMasters; --i) rLocalIndexStructure.master_index_vector.push_back(i); } ///@} }; } // namespace Internals } // namespace Kratos #endif // CONSTRAINT_SLAVE_H_INCLUDED