source
stringlengths
3
92
c
stringlengths
26
2.25M
vednnActivationBackward.c
#include "vednnActivationBackward.h" #include "vednn-def.h" #include <stdio.h> #include <stdint.h> static inline vednnError_t vednnActivationBackward_wrapper( vednnActivationBackward_t pFunc, VEDNN_ACTIVATIONBKW_ARGS ) { #ifndef VEDNN_USE_OPENMP return pFunc(VEDNN_ACTIVATIONBKW_ARGS_LIST); #else if ( __vednn_omp_num_threads == 1 ) { return pFunc(VEDNN_ACTIVATIONBKW_ARGS_LIST); } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t eachNElement = nElements / nthreads ; int64_t remain = nElements % nthreads ; int64_t elementBegin = eachNElement * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myElement = eachNElement + ( threadid < remain ? 1 : 0 ) ; if( myElement == 0 ) { rc |= VEDNN_SUCCESS ; } else { float* _pDataGradOut = ((float *)pDataGradOut) + elementBegin ; float* _pDataIn = ((float *)pDataIn) + elementBegin ; float* _pDataGradIn = ((float *)pDataGradIn) + elementBegin ; rc |= pFunc((void*)_pDataGradOut, (void*)_pDataIn, (void*) _pDataGradIn, myElement) ; } } return rc ; } #endif // openmp } /* ------------------------- public API ---------------------------------- */ vednnError_t vednnActivationBackward( const vednnActivationMode_t mode, VEDNN_ACTIVATIONBKW_ARGS) { #define OMPWRAP( IMPL ) WRAP_RET(IMPL, \ vednnActivationBackward_wrapper, VEDNN_ACTIVATIONBKW_ARGS_LIST) switch(mode) { case VEDNN_ACTIVATION_RELU : OMPWRAP( vednnActivationBackward_Relu ); } fprintf(stderr, "VEDNN Error : vednnActivationBackward : Invalid Parameter !!\n") ; return VEDNN_ERROR_INVALID_PARAM ; #undef OMPWRAP } // vim: et sw=2 ts=2
util.h
#ifndef CORE_UTIL_H_ #define CORE_UTIL_H_ #include <math.h> #include <omp.h> #include <cstdio> // For GCC #ifndef __host__ #define __host__ #endif #ifndef __device__ #define __device__ #endif // From // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions // With help from https://stackoverflow.com/a/39287554/3427580 #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600 static __inline__ __device__ double atomicAdd(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; if (val == 0.0) { return __longlong_as_double(old); } do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif template <typename T> __host__ __device__ inline void atomic_add(T *address, T val) { #ifdef __CUDACC__ // CUDA versions of atomic add atomicAdd(address, val); #else // C++ version of atomic add #pragma omp atomic *address += val; #endif } template <typename T> __host__ __device__ inline const T fnegmod(const T lval, const T rval) { return fmod(fmod(lval, rval) + rval, rval); } __host__ __device__ inline int64_t negmod(const int64_t lval, const int64_t rval) { return ((lval % rval) + rval) % rval; } #endif
condense.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <math.h> #define MIN(X,Y) ((X)<(Y) ? (X) : (Y)) #define MAX(X,Y) ((X)>(Y) ? (X) : (Y)) /* * def condense(op, a, loc): * nd = loc[-1] * out = numpy.empty((nd,nd)) * for i,i0 in enumerate(loc): * i1 = loc[i+1] * for j,j0 in enumerate(loc): * j1 = loc[j+1] * out[i,j] = op(a[i0:i1,j0:j1]) * return out */ void NPcondense(double (*op)(double *, int, int, int), double *out, double *a, int *loc_x, int *loc_y, int nloc_x, int nloc_y) { const size_t nj = loc_y[nloc_y]; const size_t Nloc_y = nloc_y; #pragma omp parallel { int i, j, i0, j0, di, dj; #pragma omp for for (i = 0; i < nloc_x; i++) { i0 = loc_x[i]; di = loc_x[i+1] - i0; for (j = 0; j < nloc_y; j++) { j0 = loc_y[j]; dj = loc_y[j+1] - j0; out[i*Nloc_y+j] = op(a+i0*nj+j0, nj, di, dj); } } } } double NP_sum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i*nd+j]; } } return out; } double NP_max(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, a[i*nd+j]); } } return out; } double NP_min(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, a[i*nd+j]); } } return out; } double NP_abssum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += fabs(a[i*nd+j]); } } return out; } double NP_absmax(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, fabs(a[i*nd+j])); } } return out; } double NP_absmin(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, fabs(a[i*nd+j])); } } return out; } double NP_norm(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i*nd+j] * a[i*nd+j]; } } return sqrt(out); }
core_ctradd.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztradd.c, normal z -> c, Fri Sep 28 17:38:23 2018 * **/ #include <plasma_core_blas.h> #include "plasma_internal.h" #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_tradd * * Performs an addition of two trapezoidal matrices similarly to the * pctradd() function from the PBLAS library: * * \f[ B = \alpha * op( A ) + \beta * B, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^H, \f] * * alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or * n-by-m matrix depending on the value of transa and B an m-by-n matrix. * ******************************************************************************* * * @param[in] uplo * Specifies the shape of A and B matrices: * - PlasmaUpper: op( A ) and B are upper trapezoidal matrices. * - PlasmaLower: op( A ) and B are lower trapezoidal matrices. * * @param[in] transa * Specifies whether the matrix A is non-transposed, transposed, or * conjugate transposed * - PlasmaNoTrans: op( A ) = A * - PlasmaTrans: op( A ) = A^T * - PlasmaConjTrans: op( A ) = A^H * * @param[in] m * Number of rows of the matrices op( A ) and B. * m >= 0. * * @param[in] n * Number of columns of the matrices op( A ) and B. * n >= 0. * * @param[in] alpha * Scalar factor of A. * * @param[in] A * Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans * and m otherwise. * * @param[in] lda * Leading dimension of the array A. lda >= max(1,l), where l is m * when transa = PlasmaNoTrans and n otherwise. * * @param[in] beta * Scalar factor of B. * * @param[in,out] B * Matrix of size ldb-by-n. * On exit, B = alpha * op( A ) + beta * B * * @param[in] ldb * Leading dimension of the array B. * ldb >= max(1,m). * ******************************************************************************/ __attribute__((weak)) int plasma_core_ctradd(plasma_enum_t uplo, plasma_enum_t transa, int m, int n, plasma_complex32_t alpha, const plasma_complex32_t *A, int lda, plasma_complex32_t beta, plasma_complex32_t *B, int ldb) { // Check input arguments if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_coreblas_error("illegal value of uplo"); return -1; } if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_coreblas_error("illegal value of transa"); return -2; } if (m < 0) { plasma_coreblas_error("illegal value of m"); return -3; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -4; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -6; } if ((transa == PlasmaNoTrans && lda < imax(1, m) && m > 0) || (transa != PlasmaNoTrans && lda < imax(1, n) && n > 0)) { plasma_coreblas_error("illegal value of lda"); return -7; } if (B == NULL) { plasma_coreblas_error("NULL B"); return -9; } if (ldb < imax(1, m) && (m > 0)) { plasma_coreblas_error("illegal value of ldb"); return -10; } // quick return if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0)) return PlasmaSuccess; //============== // PlasmaLower //============== if (uplo == PlasmaLower) { switch (transa) { case PlasmaConjTrans: for (int j = 0; j < n; j++) for (int i = j; i < m; i++) B[ldb*j+i] = beta * B[ldb*j+i] + alpha * conjf(A[lda*i+j]); break; case PlasmaTrans: for (int j = 0; j < n; j++) for (int i = j; i < m; i++) B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*i+j]; break; case PlasmaNoTrans: default: for (int j = 0; j < n; j++) for (int i = j; i < m; i++) B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*j+i]; } } //============== // PlasmaUpper //============== else { switch (transa) { case PlasmaConjTrans: for (int j = 0; j < n; j++) for (int i = 0; i < imin(j+1, m); i++) B[ldb*j+i] = beta * B[ldb*j+i] + alpha * conjf(A[lda*i+j]); break; case PlasmaTrans: for (int j = 0; j < n; j++) for (int i = 0; i < imin(j+1, m); i++) B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*i+j]; break; case PlasmaNoTrans: default: for (int j = 0; j < n; j++) for (int i = 0; i < imin(j+1, m); i++) B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*j+i]; } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_ctradd( plasma_enum_t uplo, plasma_enum_t transa, int m, int n, plasma_complex32_t alpha, const plasma_complex32_t *A, int lda, plasma_complex32_t beta, plasma_complex32_t *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { int k = (transa == PlasmaNoTrans) ? n : m; #pragma omp task depend(in:A[0:lda*k]) \ depend(inout:B[0:ldb*n]) { if (sequence->status == PlasmaSuccess) { int retval = plasma_core_ctradd(uplo, transa, m, n, alpha, A, lda, beta, B, ldb); if (retval != PlasmaSuccess) { plasma_error("core_ctradd() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
primes.c
/* * primes.c: Example of prime numbers counting in OpenMP. * * (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com> */ #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> const int a = 1; const int b = 10000000; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* * is_prime_number: Returns 1 if n is a prime number and 0 otherwise. * This function uses trial division primality test. */ int is_prime_number(int n) { int limit = sqrt(n) + 1; for (int i = 2; i <= limit; i++) { if (n % i == 0) return 0; } return (n > 1) ? 1 : 0; } int count_prime_numbers(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nprimes++; } return nprimes; } int count_prime_numbers_omp(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; #pragma omp parallel { double t = omp_get_wtime(); int nloc = 0; /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ #pragma omp for nowait for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nloc++; } /* 'nowait' disables barrier after for */ #pragma omp atomic nprimes += nloc; t = omp_get_wtime() - t; printf("Thread %d execution time: %.6f sec.\n", omp_get_thread_num(), t); } return nprimes; } double run_serial() { double t = wtime(); int n = count_prime_numbers(a, b); t = wtime() - t; printf("Result (serial): %d\n", n); return t; } double run_parallel() { double t = wtime(); int n = count_prime_numbers_omp(a, b); t = wtime() - t; printf("Result (parallel): %d\n", n); return t; } int main(int argc, char **argv) { printf("Count prime numbers on [%d, %d]\n", a, b); double tserial = run_serial(); double tparallel = run_parallel(); printf("Execution time (serial): %.6f\n", tserial); printf("Execution time (parallel): %.6f\n", tparallel); printf("Speedup: %.2f\n", tserial / tparallel); return 0; }
GB_binop__iseq_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_int16 // A.*B function (eWiseMult): GB_AemultB__iseq_int16 // A*D function (colscale): GB_AxD__iseq_int16 // D*A function (rowscale): GB_DxB__iseq_int16 // C+=B function (dense accum): GB_Cdense_accumB__iseq_int16 // C+=b function (dense accum): GB_Cdense_accumb__iseq_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int16 // C=scalar+B GB_bind1st__iseq_int16 // C=scalar+B' GB_bind1st_tran__iseq_int16 // C=A+scalar GB_bind2nd__iseq_int16 // C=A'+scalar GB_bind2nd_tran__iseq_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT16 || GxB_NO_ISEQ_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__iseq_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
GB_unop__creal_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__creal_fp32_fc32 // op(A') function: GB_unop_tran__creal_fp32_fc32 // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = crealf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = crealf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = crealf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CREAL || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__creal_fp32_fc32 ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = crealf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__creal_fp32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
TempArray.h
#ifndef _TEMPARRAY_H #define _TEMPARRAY_H #include <iostream> #include <string> #include <cmath> #include <cstdlib> #include <vector> #include "Border.h" #define MAX_NBR_AXIS 3 #define TA_MIN_SIZE_FOR_MEM_ALLOC_CALL 50000 #undef _USEMEM #define _USEMEM 0 #ifdef _USEMEM #include "Memory.h" #endif #define CHECK_DIM 0 using std::string; //****************************************************************************** // Template pratial specialization //*****************************************************************************/ class NewArray { public: bool operator() () {return true;} }; class Old2dArray { public: bool operator() () {return false;} }; typedef unsigned char byte; #define fltarray to_array<float,true> #define dblarray to_array<double,true> #define intarray to_array<int,true> #define bytearray to_array<byte,true> #define cfarray to_array<complex_f,true> #define cdarray to_array<complex_d,true> #define Ifloat to_array<float,false> #define Idouble to_array<double,false> #define Iint to_array<int,false> #define Icomplex_f to_array<complex_f,false> #define Icomplex_d to_array<complex_d,false> using namespace std; //****************************************************************************** // Template Array class //*****************************************************************************/ template <class PARAM_TYPE, bool ARRAY_TYPE> class to_array { public: typedef int (*TestIndexFunction)(int, int); private: PARAM_TYPE* po_Buffer; // vector of i_NbElem elt int i_NbElem; // number of elt int i_NbAxis; // number of axis int pto_TabNaxis[MAX_NBR_AXIS]; // number of point per axis //char tc_NameArray[SIZE_NAME]; string o_NameArray; bool e_UseClassMemAlloc; bool e_GetBuffer; bool is_ima; TestIndexFunction test_index_function; type_border Border; // Border management type public: PARAM_TYPE JunkVar; void set2ima() {if (i_NbAxis == 2) is_ima=true;} void set2tab() {if (i_NbAxis == 2) is_ima=false;} to_array(); to_array (int pi_Nx, const char *Name); to_array (int pi_Nx, int pi_Ny, const char *Name); to_array (int pi_Nx, int pi_Ny, int pi_Nz, const char *Name); to_array (int pi_Nx, int pi_Ny=0, int pi_Nz=0); ~to_array (); void free(); PARAM_TYPE* buffer(); PARAM_TYPE* const buffer() const; void init (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat); void init (); void init (PARAM_TYPE Val); void alloc (int pi_Nx, const char* Name=0); void alloc (int pi_Nx, int pi_Ny, const char* Name=0); void alloc (int pi_Nx, int pi_Ny, int pi_Nz, const char* Name=0); void alloc (PARAM_TYPE *BuffData, int Nbr_Line, int Nbr_Col, const char *Name=0, bool MemManag=false); void alloc (PARAM_TYPE *BuffData, int Nx, int Ny, int Nz, const char *Name=0, bool MemManag=false); void reform (const int pi_Nx, const int pi_Ny=0, const int pi_Nz=0); void resize (const int pi_Nx, const int pi_Ny=0, const int pi_Nz=0); inline PARAM_TYPE& operator() (int x) const; inline PARAM_TYPE operator() (int x, type_border bord) const; inline PARAM_TYPE& operator() (int x, int y) const; inline PARAM_TYPE operator() (int x, int y, type_border bord) const; inline PARAM_TYPE& operator() (int x, int y, int z) const; inline PARAM_TYPE operator() (int x, int y, int z, type_border bord) const; inline PARAM_TYPE& setx (int x, type_border bord) ; inline PARAM_TYPE& setxy (int x, int y, type_border bord) ; inline PARAM_TYPE& setxyz (int x, int y, int z, type_border bord) ; /* inline PARAM_TYPE& operator[] (int x) const; inline PARAM_TYPE& operator[] (int x, type_border bord) const; inline PARAM_TYPE& operator[] (int x, int y) const; inline PARAM_TYPE& operator[] (int x, int y, type_border bord) const; inline PARAM_TYPE& operator[] (int x, int y, int z) const; inline PARAM_TYPE & operator[] (int x, int y, int z, type_border bord) const; */ const to_array<PARAM_TYPE,ARRAY_TYPE>& operator = (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat); const to_array<PARAM_TYPE,ARRAY_TYPE>& operator += (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat); const to_array<PARAM_TYPE,ARRAY_TYPE>& operator *= (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat); const to_array<PARAM_TYPE,ARRAY_TYPE>& operator *= (const double coef); const to_array<PARAM_TYPE,ARRAY_TYPE>& operator -= (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat); const to_array<PARAM_TYPE,ARRAY_TYPE>& operator /= (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat); const to_array<PARAM_TYPE,ARRAY_TYPE>& operator ^ (const double pf_coef); void info(string Name=""); void display (int pi_NbElem=0); void rampgen (); void sup_threshold (float ThresholLevel); void inf_threshold (float ThresholLevel); int n_elem() const {return i_NbElem;} int naxis() const { return i_NbAxis;} int axis(int pi_NumAxis) const { return pto_TabNaxis[pi_NumAxis-1];} int nx() const { return pto_TabNaxis[0];} int ny() const { return pto_TabNaxis[1];} int nz() const { return pto_TabNaxis[2];} //string get_name () const {return o_NameArray;} bool get_isbuffer() const { return e_GetBuffer;} void set_isbuffer(bool b) { e_GetBuffer = b;} bool get_buf() const { return e_GetBuffer;} bool get_memalloc() const { return e_UseClassMemAlloc;} int nc() const { return pto_TabNaxis[0];} int nl() const { return pto_TabNaxis[1];} PARAM_TYPE min (); PARAM_TYPE max (); PARAM_TYPE maxfabs (); PARAM_TYPE min (int& pri_ind); PARAM_TYPE max (int& pri_ind); PARAM_TYPE maxfabs (int& pri_ind); double total () const; double energy () const; double sigma () const; double mean () const; void sigma_clip (float& pf_Mean, float& pf_Sigma, int pi_Nit=3) const; float sigma_clip (int pi_Nit=3) const; to_array<PARAM_TYPE,ARRAY_TYPE> (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Obj); inline void setBorder( type_border border ) { Border = border; switch (border) { case I_CONT: test_index_function = test_index_cont; break; case I_MIRROR: test_index_function = test_index_mirror; break; case I_PERIOD: test_index_function = test_index_period; break; case I_ZERO: default: test_index_function = test_index_zero; break; } // end case } private: void set_attrib(); }; //------------------------------------------------------------------------------ // to_array () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::to_array () { set_attrib(); } //------------------------------------------------------------------------------ // to_array (int pi_Nx, const char* Name) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::to_array (int pi_Nx, const char* Name) { set_attrib(); alloc(pi_Nx, 0, 0, Name); } //------------------------------------------------------------------------------ // to_array (int pi_Nx, int pi_Ny, const char* Name) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::to_array (int pi_Nx, int pi_Ny, const char* Name) { set_attrib(); alloc(pi_Nx, pi_Ny, 0, Name); } //------------------------------------------------------------------------------ // to_array (int pi_Nx, int pi_Ny, int pi_Nz, const char* Name) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::to_array (int pi_Nx, int pi_Ny, int pi_Nz, const char* Name) { set_attrib(); alloc(pi_Nx, pi_Ny, pi_Nz, Name); } //------------------------------------------------------------------------------ // to_array (int pi_Nx, int pi_Ny, int pi_Nz) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::to_array (int pi_Nx, int pi_Ny, int pi_Nz) { set_attrib(); alloc(pi_Nx, pi_Ny, pi_Nz); } //------------------------------------------------------------------------------ // ~to_array () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::~to_array () {free();} //------------------------------------------------------------------------------ // free () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::free() { if (e_UseClassMemAlloc == true){ #ifdef _USEMEM MemMg_free (po_Buffer); #endif } else { if (i_NbElem != 0 && e_GetBuffer == false) delete[] po_Buffer; } i_NbElem=0;i_NbAxis=0;o_NameArray="";//tc_NameArray[0]='\0'; e_UseClassMemAlloc=false; for (int i=0;i<MAX_NBR_AXIS;i++) pto_TabNaxis[i]=0; } //------------------------------------------------------------------------------ // buffer () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> PARAM_TYPE* to_array<PARAM_TYPE,ARRAY_TYPE>::buffer() { return po_Buffer; } //------------------------------------------------------------------------------ // buffer () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> PARAM_TYPE* const to_array<PARAM_TYPE,ARRAY_TYPE>::buffer() const { return po_Buffer; } //------------------------------------------------------------------------------ // init () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::init (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat) { if (n_elem() != 0) free(); if (ARRAY_TYPE==true) { alloc(pro_Mat.nx(), pro_Mat.ny(), pro_Mat.nz()); } else { alloc(pro_Mat.ny(), pro_Mat.nx(), pro_Mat.nz()); } } //------------------------------------------------------------------------------ // init (PARAM_TYPE Val=0) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from int to PARAM_TYPE must exist void to_array<PARAM_TYPE,ARRAY_TYPE>::init (PARAM_TYPE Val) { for (int i=0;i<n_elem();i++) po_Buffer[i] = Val; } template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from int to PARAM_TYPE must exist void to_array<PARAM_TYPE,ARRAY_TYPE>::init () { PARAM_TYPE Val=0; for (int i=0;i<n_elem();i++) po_Buffer[i] = Val; } //------------------------------------------------------------------------------ // alloc (int pi_Nx, const char* Name) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::alloc (int pi_Nx, const char* Name) { alloc (pi_Nx, 0, 0, Name); } //------------------------------------------------------------------------------ // alloc (int pi_Nx, int pi_Ny, const char* Name) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::alloc (int pi_Nx, int pi_Ny, const char* Name) { alloc (pi_Nx, pi_Ny, 0, Name); } //------------------------------------------------------------------------------ // alloc (int pi_Nx, int pi_Ny, int pi_Nz, const char* Name) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::alloc (int pi_Nx, int pi_Ny, int pi_Nz, const char* Name) { if (i_NbElem != 0) free(); if (pi_Nz != 0) i_NbElem = pi_Nz*pi_Ny*pi_Nx; else if (pi_Ny != 0) i_NbElem = pi_Ny*pi_Nx; else i_NbElem = pi_Nx; if (i_NbElem > TA_MIN_SIZE_FOR_MEM_ALLOC_CALL) { #ifdef _USEMEM PARAM_TYPE Dummy=0; po_Buffer = MemMg_alloc (i_NbElem,Dummy); e_UseClassMemAlloc = true; #else e_UseClassMemAlloc = false; po_Buffer = new PARAM_TYPE [i_NbElem]; if (po_Buffer == 0) cout << " Not enought memory " << endl; #endif } else if (i_NbElem != 0) { e_UseClassMemAlloc = false; po_Buffer = new PARAM_TYPE [i_NbElem]; if (po_Buffer == 0) cout << " Not enought memory " << endl; } else { e_UseClassMemAlloc = false; po_Buffer = (PARAM_TYPE*)NULL; e_GetBuffer=false; } e_GetBuffer = false; pto_TabNaxis[2] = (pi_Nz != 0) ? pi_Nz : 0; if (ARRAY_TYPE==true) { pto_TabNaxis[1] = (pi_Ny != 0) ? pi_Ny : 0; pto_TabNaxis[0] = (pi_Nx != 0) ? pi_Nx : 0; is_ima = false; } else { pto_TabNaxis[0] = (pi_Ny != 0) ? pi_Ny : 0; pto_TabNaxis[1] = (pi_Nx != 0) ? pi_Nx : 0; is_ima = true; } i_NbAxis = (pi_Nx != 0) ? 1 : 0; i_NbAxis = (pi_Ny != 0) ? 2 : i_NbAxis; i_NbAxis = (pi_Nz != 0) ? 3 : i_NbAxis; memset (po_Buffer, 0, i_NbElem*sizeof(PARAM_TYPE)); //if (Name != NULL) strcpy(tc_NameArray, Name); if (Name != NULL) o_NameArray = Name; //if (ARRAY_TYPE==true) // cout << "new Ifloat : Nlignes=" << nl() << ", Ncol=" << nc() << endl; //else // cout << "new fltarr : Nlignes=" << nl() << ", Ncol=" << nc() << endl; } //------------------------------------------------------------------------------ // alloc (PARAM_TYPE *BuffData, int Nbr_Line, int Nbr_Col, const char *Name, bool MemManag) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::alloc (PARAM_TYPE *BuffData, int Nbr_Line, int Nbr_Col, const char *Name, bool MemManag) { if (i_NbElem != 0) { if (e_UseClassMemAlloc == true) { #ifdef _USEMEM MemMg_free (po_Buffer); #endif } else if (e_GetBuffer == false) delete [] po_Buffer; } e_GetBuffer = true; e_UseClassMemAlloc = MemManag; po_Buffer = BuffData; i_NbElem = Nbr_Line * Nbr_Col; if (ARRAY_TYPE==true) { pto_TabNaxis[1] = Nbr_Col; pto_TabNaxis[0] = Nbr_Line; is_ima = false; } else { pto_TabNaxis[0] = Nbr_Col; pto_TabNaxis[1] = Nbr_Line; is_ima = true; } i_NbAxis = 2; } //------------------------------------------------------------------------------ // alloc (PARAM_TYPE *BuffData, int Nx, int Ny, int Nz, const char *Name, bool MemManag) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::alloc (PARAM_TYPE *BuffData, int _Nx, int _Ny, int _Nz, const char *Name, bool MemManag) {// doesnt work well, treated as an image if (i_NbElem != 0) { if (e_UseClassMemAlloc == true) { #ifdef _USEMEM MemMg_free (po_Buffer); #endif } else if (e_GetBuffer == false) delete [] po_Buffer; } e_GetBuffer = true; e_UseClassMemAlloc = MemManag; po_Buffer = BuffData; i_NbElem = _Nx * _Ny * _Nz; pto_TabNaxis[0] = _Nx; pto_TabNaxis[1] = _Ny; pto_TabNaxis[2] = _Nz; i_NbAxis = 3; is_ima = false; } //------------------------------------------------------------------------------ // reform (const int pi_Nx, const int pi_Ny, const int pi_Nz) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::reform (const int pi_Nx, const int pi_Ny, const int pi_Nz) { if (i_NbElem == 0) alloc(pi_Nx,pi_Ny,pi_Nz,"alloc resize"); else { int ai_Inter; i_NbAxis = 1; ai_Inter = pi_Nx; pto_TabNaxis[0] = 0; pto_TabNaxis[1] = 0; pto_TabNaxis[2] = 0; // test Array type if (ARRAY_TYPE==true) { pto_TabNaxis[0] = pi_Nx; if (pi_Ny != 0) {pto_TabNaxis[1] = pi_Ny; i_NbAxis=2; ai_Inter = pi_Nx*pi_Ny;} is_ima = false; } else { pto_TabNaxis[1] = pi_Nx; if (pi_Ny != 0) {pto_TabNaxis[0] = pi_Ny; i_NbAxis=2; ai_Inter = pi_Nx*pi_Ny;} is_ima = true; } if (pi_Nz != 0) {pto_TabNaxis[2] = pi_Nz; i_NbAxis=3; ai_Inter = pi_Nx*pi_Ny*pi_Nz;} // increase buffer size if (ai_Inter > i_NbElem) { // deallocate previous bufferr if (e_UseClassMemAlloc == true) { #ifdef _USEMEM MemMg_free (po_Buffer); #endif } else if (e_GetBuffer == false && i_NbElem != 0) delete [] po_Buffer; // allocate new buffer if (ai_Inter > TA_MIN_SIZE_FOR_MEM_ALLOC_CALL) { #ifdef _USEMEM e_UseClassMemAlloc = true; PARAM_TYPE Dummy=0; po_Buffer = MemMg_alloc (ai_Inter, Dummy); #else e_UseClassMemAlloc = false; po_Buffer = new PARAM_TYPE [ai_Inter]; if (po_Buffer == 0) cout << "Not enought memory " << endl; #endif } else { e_UseClassMemAlloc = false; po_Buffer = new PARAM_TYPE [ai_Inter]; if (po_Buffer == 0) cout << "Not enought memory " << endl; } e_GetBuffer = false; } i_NbElem = ai_Inter; } } //------------------------------------------------------------------------------ // resize (const int pi_Nx, const int pi_Ny=0, const int pi_Nz=0) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::resize (const int pi_Nx, const int pi_Ny, const int pi_Nz) { reform (pi_Nx,pi_Ny, pi_Nz); } //------------------------------------------------------------------------------ // operator (int x) //------------------------------------------------------------------------------ // could be used with 2d or 3d tab, on all the element.... => no border test... template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE& to_array<PARAM_TYPE,ARRAY_TYPE>::operator() (int x) const { // if (naxis() != 1) {cout << "One dim array" << endl; exit(-1);} //!!!!!!!!! assert (test_indice_i (tc_NameArray, x, nx())); return po_Buffer[x]; } /* template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE& to_array<PARAM_TYPE,ARRAY_TYPE>::operator[] (int x) const { // if (naxis() != 1) {cout << "One dim array" << endl; exit(-1);} //!!!!!!!!! assert (test_indice_i (tc_NameArray, x, nx())); return po_Buffer[x]; } */ //------------------------------------------------------------------------------ // operator (int x, type_border bord) //------------------------------------------------------------------------------ // !!!!! convert function from int to PARAM_TYPE must exist template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::operator() (int x, type_border bord) const { if (naxis() != 1) { cout << "Error: naxis = " << naxis() << " and a one dim array is expected ..." << endl; exit(-1); } if ((x<0) || (x>=nx())) { PARAM_TYPE Val; int indx=x; switch (bord) { case I_CONT: indx = test_index_cont(x,nx()); Val = po_Buffer[indx]; break; case I_MIRROR: indx = test_index_mirror(x,nx()); Val = po_Buffer[indx]; break; case I_PERIOD: indx = test_index_period(x,nx()); Val = po_Buffer[indx]; break; case I_ZERO: Val=0;break; break; default:exit(-1);break; } return Val; } else return po_Buffer[x]; } template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE & to_array<PARAM_TYPE,ARRAY_TYPE>::setx (int x, type_border bord) { if (naxis() != 1) { cout << "Error: naxis = " << naxis() << " and a one dim array is expected ..." << endl; exit(-1); } if ((x<0) || (x>=nx())) { int indx=x; switch (bord) { case I_CONT: indx = test_index_cont(x,nx()); JunkVar = po_Buffer[indx]; return JunkVar; break; case I_MIRROR: indx = test_index_mirror(x,nx()); return po_Buffer[indx]; break; case I_PERIOD: indx = test_index_period(x,nx()); return po_Buffer[indx]; break; case I_ZERO: JunkVar=0; return JunkVar; break; break; default:exit(-1);break; } } else return po_Buffer[x]; } //------------------------------------------------------------------------------ // operator (int x, int y) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE& to_array<PARAM_TYPE,ARRAY_TYPE>::operator() (int x, int y) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif return po_Buffer[x*pto_TabNaxis[0]+y]; } // specialization to the array case template <> inline float& to_array<float,true>::operator()(int x, int y) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif return po_Buffer[y*pto_TabNaxis[0]+x]; } template <> inline double& to_array<double,true>::operator()(int x, int y) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif return po_Buffer[y*pto_TabNaxis[0]+x]; } template <> inline int& to_array<int,true>::operator()(int x, int y) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif return po_Buffer[y*pto_TabNaxis[0]+x]; } template <> inline byte& to_array<byte,true>::operator()(int x, int y) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif return po_Buffer[y*pto_TabNaxis[0]+x]; } template <> inline complex_f& to_array<complex_f,true>::operator()(int x, int y) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif return po_Buffer[y*pto_TabNaxis[0]+x]; } template <> inline complex_d& to_array<complex_d,true>::operator()(int x, int y) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif return po_Buffer[y*pto_TabNaxis[0]+x]; } //------------------------------------------------------------------------------ // operator (int x, int y, type_border bord) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::operator() (int x, int y, type_border bord) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ... " << endl; exit(-1); } #endif register int indx = (*test_index_function)(x,pto_TabNaxis[1]); register int indy = (*test_index_function)(y,pto_TabNaxis[0]); return po_Buffer[indx*pto_TabNaxis[0]+indy]; } // specialization to the array case template <> inline float to_array<float,true>::operator()(int x, int y, type_border bord) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif register int indx = (*test_index_function)(x,pto_TabNaxis[0]); register int indy = (*test_index_function)(y,pto_TabNaxis[1]); return po_Buffer[indy*pto_TabNaxis[0]+indx]; } template <> inline double to_array<double,true>::operator()(int x, int y, type_border bord) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif register int indx = (*test_index_function)(x,pto_TabNaxis[0]); register int indy = (*test_index_function)(y,pto_TabNaxis[1]); return po_Buffer[indy*pto_TabNaxis[0]+indx]; } template <> inline int to_array<int,true>::operator()(int x, int y, type_border bord) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif register int indx = (*test_index_function)(x,pto_TabNaxis[0]); register int indy = (*test_index_function)(y,pto_TabNaxis[1]); return po_Buffer[indy*pto_TabNaxis[0]+indx]; } template <> inline byte to_array<byte,true>::operator()(int x, int y, type_border bord) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif register int indx = (*test_index_function)(x,pto_TabNaxis[0]); register int indy = (*test_index_function)(y,pto_TabNaxis[1]); return po_Buffer[indy*pto_TabNaxis[0]+indx]; } template <> inline complex_f to_array<complex_f,true>::operator()(int x, int y, type_border bord) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif register int indx = (*test_index_function)(x,pto_TabNaxis[0]); register int indy = (*test_index_function)(y,pto_TabNaxis[1]); return po_Buffer[indy*pto_TabNaxis[0]+indx]; } template <> inline complex_d to_array<complex_d,true>::operator()(int x, int y, type_border bord) const { #if CHECK_DIM if (i_NbAxis != 2) { cout << "Error: naxis = " << i_NbAxis << " and a two dim array is expected ..." << endl; exit(-1); } #endif register int indx = (*test_index_function)(x,pto_TabNaxis[0]); register int indy = (*test_index_function)(y,pto_TabNaxis[1]); return po_Buffer[indy*pto_TabNaxis[0]+indx]; } template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE & to_array<PARAM_TYPE,ARRAY_TYPE>::setxy (int x, int y, type_border bord) { if (naxis() != 2) { cout << "Error: naxis = " << naxis() << " and a two dim array is expected ... " << endl; exit(-1); } int indx=x; int indy=y; int Nx = (ARRAY_TYPE==true) ? nx(): ny(); int Ny = (ARRAY_TYPE==true) ? ny(): nx(); if ((x<0) || (x>=Nx) || (y<0) || (y>=Ny)) { switch (bord) { case I_ZERO: JunkVar = 0; return JunkVar; case I_CONT: if (is_ima==false) { indx = test_index_cont(x,nx()); indy = test_index_cont(y,ny()); JunkVar = po_Buffer[indy*pto_TabNaxis[0]+indx]; } else { indx = test_index_cont(x,ny()); indy = test_index_cont(y,nx()); JunkVar = po_Buffer[indx*pto_TabNaxis[0]+indy]; } return JunkVar; break; case I_MIRROR: if (ARRAY_TYPE==true) { indx = test_index_mirror(x,nx()); indy = test_index_mirror(y,ny()); } else { indx = test_index_mirror(x,ny()); indy = test_index_mirror(y,nx()); } break; case I_PERIOD: if (ARRAY_TYPE==true) { indx = test_index_period(x,nx()); indy = test_index_period(y,ny()); } else { indx = test_index_period(x,ny()); indy = test_index_period(y,nx()); } break; default: exit(-1);break; } } if (ARRAY_TYPE==true) { return po_Buffer[indy*pto_TabNaxis[0]+indx]; } else { return po_Buffer[indx*pto_TabNaxis[0]+indy]; } } //------------------------------------------------------------------------------ // operator (int x, int y, int z) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE& to_array<PARAM_TYPE,ARRAY_TYPE>::operator() (int x, int y, int z) const { #if CHECK_DIM if (naxis() != 3) { cout << "Error: naxis = " << naxis() << " and a three dimension array is expected ... " << endl; exit(-1); } #endif return po_Buffer[z*pto_TabNaxis[0]*pto_TabNaxis[1]+y*pto_TabNaxis[0]+x]; } //------------------------------------------------------------------------------ // operator (int x, int y, int z, type_border bord) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::operator() (int x, int y, int z, type_border bord) const { #if CHECK_DIM if (naxis() != 3) { cout << "Error: naxis = " << naxis() << " and a three dimension array is expected ... " << endl; exit(-1); } #endif int indx=x; int indy=y; int indz=z; if ((x<0) || (x>=nx()) || (y<0) || (y>=ny()) || (z<0) || (z>=nz())) { PARAM_TYPE Val; switch (bord) { case I_CONT: indx = test_index_cont(x,nx()); indy = test_index_cont(y,ny()); indz = test_index_cont(z,nz()); break; case I_MIRROR: indx = test_index_mirror(x,nx()); indy = test_index_mirror(y,ny()); indz = test_index_mirror(z,nz()); break; case I_PERIOD: indx = test_index_period(x,nx()); indy = test_index_period(y,ny()); indz = test_index_period(z,nz()); break; case I_ZERO: Val=0; return Val; break; break; default:exit(-1);break; } } return po_Buffer[indz*pto_TabNaxis[0]*pto_TabNaxis[1]+indy*pto_TabNaxis[0]+indx]; } template <class PARAM_TYPE, bool ARRAY_TYPE> inline PARAM_TYPE & to_array<PARAM_TYPE,ARRAY_TYPE>::setxyz (int x, int y, int z, type_border bord) { #if CHECK_DIM if (naxis() != 3) { cout << "Error: naxis = " << naxis() << " and a three dimension array is expected ... " << endl; exit(-1); } #endif int indx=x; int indy=y; int indz=z; if ((x<0) || (x>=nx()) || (y<0) || (y>=ny()) || (z<0) || (z>=nz())) { switch (bord) { case I_CONT: indx = test_index_cont(x,nx()); indy = test_index_cont(y,ny()); indz = test_index_cont(z,nz()); return JunkVar; break; case I_MIRROR: indx = test_index_mirror(x,nx()); indy = test_index_mirror(y,ny()); indz = test_index_mirror(z,nz()); break; case I_PERIOD: indx = test_index_period(x,nx()); indy = test_index_period(y,ny()); indz = test_index_period(z,nz()); break; case I_ZERO: JunkVar=0; return JunkVar; break; break; default:exit(-1);break; } } return po_Buffer[indz*pto_TabNaxis[0]*pto_TabNaxis[1]+indy*pto_TabNaxis[0]+indx]; } //------------------------------------------------------------------------------ // operator = //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> const to_array<PARAM_TYPE,ARRAY_TYPE>& to_array<PARAM_TYPE,ARRAY_TYPE>::operator = (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat) { reform (pro_Mat.n_elem()); #ifdef _OPENMP #pragma omp parallel for for (int i=0; i<i_NbElem; i++) po_Buffer[i] = pro_Mat.po_Buffer[i]; #else PARAM_TYPE* buf = &po_Buffer[0]; PARAM_TYPE* otherBuf = &pro_Mat.po_Buffer[0]; int N = i_NbElem; while (N--) *buf++ = *otherBuf++; #endif i_NbAxis = pro_Mat.naxis(); for (int j=0; j<i_NbAxis; j++) pto_TabNaxis[j] = pro_Mat.axis(j+1); return (*this); } //------------------------------------------------------------------------------ // operator += //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> const to_array<PARAM_TYPE,ARRAY_TYPE>& to_array<PARAM_TYPE,ARRAY_TYPE>::operator += (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat) { for (int x=0; x<i_NbElem; x++) po_Buffer[x] += pro_Mat.po_Buffer[x]; return (*this); } //------------------------------------------------------------------------------ // operator *= //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> const to_array<PARAM_TYPE,ARRAY_TYPE>& to_array<PARAM_TYPE,ARRAY_TYPE>::operator *= (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat) { for (int x=0; x<i_NbElem; x++) po_Buffer[x] *= pro_Mat.po_Buffer[x]; return (*this); } //------------------------------------------------------------------------------ // operator *= //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> const to_array<PARAM_TYPE,ARRAY_TYPE>& to_array<PARAM_TYPE,ARRAY_TYPE>::operator *= (const double coef) { for (int i=0; i<i_NbElem; i++) po_Buffer[i] = (PARAM_TYPE) ((double)po_Buffer[i] * coef); return (*this); } //------------------------------------------------------------------------------ // operator -= //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> const to_array<PARAM_TYPE,ARRAY_TYPE>& to_array<PARAM_TYPE,ARRAY_TYPE>::operator -= (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat) { #ifdef _OPENMP #pragma omp parallel for for (int x=0; x<i_NbElem; x++) po_Buffer[x] -= pro_Mat.po_Buffer[x]; #else PARAM_TYPE* buf = &po_Buffer[0]; PARAM_TYPE* otherBuf = &pro_Mat.po_Buffer[0]; int N = i_NbElem; while (N--) *buf++ -= *otherBuf++; #endif return (*this); } //------------------------------------------------------------------------------ // operator /= //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> const to_array<PARAM_TYPE,ARRAY_TYPE>& to_array<PARAM_TYPE,ARRAY_TYPE>::operator /= (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Mat) { for (int x=0; x<i_NbElem; x++) if ((pro_Mat.po_Buffer[x] > 1e-07) || (pro_Mat.po_Buffer[x] < -1e-07)) po_Buffer[x] /= pro_Mat.po_Buffer[x]; else po_Buffer[x]=0; return (*this); } //------------------------------------------------------------------------------ // operator ^ //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> const // !!!!! convert function from PARAM_TYPE to double must exist to_array<PARAM_TYPE,ARRAY_TYPE>& to_array<PARAM_TYPE,ARRAY_TYPE>::operator ^ (const double pf_coef) { for (int i=0; i<i_NbElem; i++) po_Buffer[i] = (PARAM_TYPE) pow ((double)po_Buffer[i], pf_coef); return (*this); } //------------------------------------------------------------------------------ // info (string Name) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! PARAM_TYPE must accept operator << !!!!!!!!!!! void to_array<PARAM_TYPE,ARRAY_TYPE>::info(string Name) { if (Name=="") cout << " :" ; else cout << " " << Name; cout << ", mean = " << mean() << ", sigma = " << sigma(); cout << ", min = " << min() << ", max = " << max() << endl; } //------------------------------------------------------------------------------ // display (int pi_NbElem) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! PARAM_TYPE must accept operator << !!!!!!!!!!! void to_array<PARAM_TYPE,ARRAY_TYPE>::display (int pi_NbElem) { if (pi_NbElem == 0) { cout <<" nx="<<pto_TabNaxis[0]<<", ny="<<pto_TabNaxis[1]<< ", nz="<<pto_TabNaxis[2]<<", naxis="<<i_NbAxis<<endl; } else { if (pi_NbElem > i_NbElem) pi_NbElem=i_NbElem; info(); cout << " "; for (int i=0; i < pi_NbElem; i++) cout << po_Buffer[i] << " " ; cout << endl; } } //------------------------------------------------------------------------------ // rampgen() //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from int to PARAM_TYPE must exist void to_array<PARAM_TYPE,ARRAY_TYPE>::rampgen() { for (int i=0;i<i_NbElem;i++) po_Buffer[i]=(PARAM_TYPE)i; } //------------------------------------------------------------------------------ // line() //------------------------------------------------------------------------------ //template <class PARAM_TYPE, bool ARRAY_TYPE> //to_array<PARAM_TYPE,ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::line (int i) { //} //------------------------------------------------------------------------------ // column() //------------------------------------------------------------------------------ //template <class PARAM_TYPE, bool ARRAY_TYPE> //to_array<PARAM_TYPE,ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::column (int j) { //} //------------------------------------------------------------------------------ // sup_threshold (float ThresholLevel) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::sup_threshold (float ThresholLevel) { for (int x=0;x<i_NbElem;x++) if ((PARAM_TYPE)po_Buffer[x] > ThresholLevel) po_Buffer[x] = (PARAM_TYPE)ThresholLevel; } //------------------------------------------------------------------------------ // inf_threshold (float ThresholLevel) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::inf_threshold (float ThresholLevel) { for (int x=0;x<i_NbElem;x++) if ((PARAM_TYPE)po_Buffer[x] < ThresholLevel) po_Buffer[x] = (PARAM_TYPE)ThresholLevel; } //------------------------------------------------------------------------------ // min () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::min () { int ai_temp=0; return (min (ai_temp)); } //------------------------------------------------------------------------------ // min (int& pri_ind) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::min (int& pri_ind) { PARAM_TYPE ao_prov=po_Buffer[0]; pri_ind=0; for (int i=1; i<i_NbElem; i++) if (ao_prov>po_Buffer[i]) { ao_prov=po_Buffer[i]; pri_ind = i; } return ao_prov; } //------------------------------------------------------------------------------ // max () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::max () { int ai_temp=0; return (max (ai_temp)); } //------------------------------------------------------------------------------ // max (int& pri_ind) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> //!!!!!!!!!! PARAM_TYPE must define operator <... PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::max (int& pri_ind) { PARAM_TYPE ao_prov=po_Buffer[0]; pri_ind=0; for (int i=1; i<i_NbElem; i++) if (ao_prov<po_Buffer[i]) { ao_prov=po_Buffer[i]; pri_ind = i; } return ao_prov; } //------------------------------------------------------------------------------ // maxfabs () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::maxfabs () { int ai_temp=0; return (maxfabs (ai_temp)); } //------------------------------------------------------------------------------ // maxfabs (int& pri_ind) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> //!!!!!!!!!! PARAM_TYPE must define operator <... PARAM_TYPE to_array<PARAM_TYPE,ARRAY_TYPE>::maxfabs (int& pri_ind) { PARAM_TYPE ao_prov=0; pri_ind=0; for (int i=0; i<i_NbElem; i++) if (fabs(ao_prov)<fabs(po_Buffer[i])) { ao_prov=po_Buffer[i]; pri_ind = i; } return ao_prov; } //------------------------------------------------------------------------------ // total () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from PARAM_TYPE to double must exist double to_array<PARAM_TYPE,ARRAY_TYPE>::total () const { double ao_prov= 0.; for (int i=0; i<i_NbElem; i++) ao_prov += po_Buffer[i]; return ao_prov; } //------------------------------------------------------------------------------ // energy () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from PARAM_TYPE to double must exist double to_array<PARAM_TYPE,ARRAY_TYPE>::energy () const { PARAM_TYPE ao_prov=(PARAM_TYPE)0; for (int i=0; i<i_NbElem; i++) ao_prov += po_Buffer[i]*po_Buffer[i]; return ao_prov; } //------------------------------------------------------------------------------ // sigma () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from PARAM_TYPE to double must exist double to_array<PARAM_TYPE,ARRAY_TYPE>::sigma () const { double ao_moy = mean(); double ad_sigma=0., ad_val=0; for (int i=0; i<i_NbElem; i++) { ad_val = po_Buffer[i] - ao_moy; ad_sigma += ad_val*ad_val; } if ((ad_sigma /= i_NbElem) > 1e-07) ad_sigma = sqrt (ad_sigma); else ad_sigma = 0.; return ad_sigma; } //------------------------------------------------------------------------------ // mean () //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from PARAM_TYPE to double must exist double to_array<PARAM_TYPE,ARRAY_TYPE>::mean () const { return (double(total())/i_NbElem); } //------------------------------------------------------------------------------ // sigma_clip (float& pf_Mean, float &pf_Sigma, int pi_Nit) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> // !!!!! convert function from PARAM_TYPE to double must exist void to_array<PARAM_TYPE,ARRAY_TYPE>::sigma_clip (float& pf_Mean, float &pf_Sigma, int pi_Nit) const { double ad_s0, ad_s1, ad_s2, ad_sm=0., ad_inter; PARAM_TYPE ao_val; pf_Mean = 0.; for (int it=0; it<pi_Nit; it++) { ad_s0=ad_s1=ad_s2=0.; for (int i=0; i<i_NbElem; i++) { ao_val = po_Buffer[i]; if ((it==0) || (fabs(double(ao_val)-pf_Mean) < ad_sm)) { ad_s0++; ad_s1 += double(ao_val); ad_s2 += double(ao_val)*double(ao_val); } } pf_Mean = ad_s1/ad_s0; ad_inter = ad_s2/ad_s0 - pf_Mean*pf_Mean; if (ad_inter > 1e-7) pf_Sigma = sqrt (ad_inter); ad_sm = 3. * pf_Sigma; } } //------------------------------------------------------------------------------ // sigma_clip (int pi_Nit) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> float to_array<PARAM_TYPE,ARRAY_TYPE>::sigma_clip (int pi_Nit) const { float af_Mean=0., af_Sigma=0.; sigma_clip (af_Mean, af_Sigma, pi_Nit); return (af_Sigma); } //------------------------------------------------------------------------------ // to_array (to_array&) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE>::to_array (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_Obj) { i_NbElem=0; if (ARRAY_TYPE==true) { alloc (pro_Obj.nx(), pro_Obj.ny(), pro_Obj.nz()); } else { alloc (pro_Obj.ny(), pro_Obj.nx(), pro_Obj.nz()); } for (int i=0;i<n_elem();i++) po_Buffer[i]=pro_Obj.po_Buffer[i]; } //------------------------------------------------------------------------------ // set_attrib (int pi_Nit) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> void to_array<PARAM_TYPE,ARRAY_TYPE>::set_attrib () { po_Buffer = (PARAM_TYPE*)NULL; i_NbElem = 0; i_NbAxis = 0; for (int i=0;i<MAX_NBR_AXIS;i++) pto_TabNaxis[i]=0; //tc_NameArray[0]='\0'; //o_NameArray = ""; e_UseClassMemAlloc = false; e_GetBuffer = false; is_ima=false; JunkVar=0; Border = I_CONT; test_index_function = test_index_cont; } //------------------------------------------------------------------------------ // operator + (to_array, to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator + (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj1, const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj2) { //to_array<PARAM_TYPE,ARRAY_TYPE>* apo_array = new to_array<PARAM_TYPE,ARRAY_TYPE>; to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; //apo_array->init(pro_obj1); ao_array.init(pro_obj1); for (int i=0; i<pro_obj1.n_elem(); i++) ao_array(i) = pro_obj1(i) + pro_obj2(i); //(*apo_array)(i) = pro_obj1(i) + pro_obj2(i); return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // operator - (to_array, to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator - (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj1, const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj2) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.init(pro_obj1); for (int i=0; i<pro_obj1.n_elem(); i++) ao_array(i) = pro_obj1(i) - pro_obj2(i); return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // operator * (to_array, to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator * (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj1, const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj2) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.init(pro_obj1); for (int i=0; i<pro_obj1.n_elem(); i++) ao_array(i) = pro_obj1(i) * pro_obj2(i); return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // operator / (to_array, to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator / (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj1, const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj2) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.init(pro_obj1); for (int i=0; i<pro_obj1.n_elem(); i++) if ((pro_obj2(i) > 1e-07) || (pro_obj2(i) < -1e-07)) ao_array(i) = pro_obj1(i) / pro_obj2(i); else ao_array(i) = 0; return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // operator * (double , to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator * (const double mult_coeff, const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.init(pro_obj); for (int i=0; i<pro_obj.nx(); i++) ao_array(i)=mult_coeff*pro_obj(i); return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // operator / (to_array,double) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator / (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj, const double div_coeff) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.init(pro_obj); for (int i=0; i<pro_obj.nx(); i++) ao_array(i)=pro_obj(i)/div_coeff; return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // operator > (to_array,double) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator > (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj, const double bound_coeff) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.init(pro_obj); for (long int i=0; i<pro_obj.nx(); i++) if(pro_obj(i) > bound_coeff) ao_array(i)=1.0; else ao_array(i)=0.0; return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // operator < (to_array,double) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> operator < (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj, const double bound_coeff) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.init(pro_obj); for (long int i=0; i<pro_obj.nx(); i++) if(pro_obj(i) < bound_coeff) ao_array(i)=1.0; else ao_array(i)=0.0; return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // mult (to_array,to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> mult (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj1, const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj2) { if(pro_obj1.ny() != pro_obj2.nx()) { printf("Can't multiply: 1st matrix number of columns different from 2nd matrix number of rows. \n"); exit(-1); } to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; if(pro_obj2.ny()>0) { ao_array.alloc(pro_obj1.nx(),pro_obj2.ny()); for (int i=0; i<pro_obj1.nx(); i++) { for (int j=0; j<pro_obj2.ny(); j++) { for (int k=0; k<pro_obj1.ny(); k++) { ao_array(i,j) += pro_obj1(i,k) * pro_obj2(k,j); } } } } else { ao_array.alloc(pro_obj1.nx()); for (int i=0; i<pro_obj1.nx(); i++) { for (int k=0; k<pro_obj1.ny(); k++) ao_array(i) += pro_obj1(i,k) * pro_obj2(k); } } return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // transpose (to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> transpose (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.alloc(pro_obj.ny(),pro_obj.nx()); for(int i=0;i<pro_obj.nx();i++) { for(int j=0;j<pro_obj.ny();j++) { ao_array(j,i)=pro_obj(i,j); } } return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // invert (to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> invert (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj) { if(pro_obj.nx() !=2 || pro_obj.ny()!=2) { printf("Matrix must be 2x2 to be inverted. \n"); exit(-1); } to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.alloc(2,2); double det=pro_obj(0,0)*pro_obj(1,1)-pro_obj(0,1)*pro_obj(1,0); ao_array(0,0)=1/det*pro_obj(1,1); ao_array(1,1)=1/det*pro_obj(0,0); ao_array(0,1)=-1/det*pro_obj(0,1); ao_array(1,0)=-1/det*pro_obj(1,0); return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // mult (to_array,to_array, int) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> mult (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj1, const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj2, int index_z) { if(index_z >= pro_obj1.nz()) printf("Wrong z index. \n"); if(pro_obj1.ny() != pro_obj2.nx()) { printf("Can't multiply: 1st matrix number of columns different from 2nd matrix number of rows. \n"); exit(-1); } to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; if(pro_obj2.ny()>0) { ao_array.alloc(pro_obj1.nx(),pro_obj2.ny()); for (int i=0; i<pro_obj1.nx(); i++) { for (int j=0; j<pro_obj2.ny(); j++) { for (int k=0; k<pro_obj1.ny(); k++) { ao_array(i,j) += pro_obj1(i,k,index_z) * pro_obj2(k,j); } } } } else { ao_array.alloc(pro_obj1.nx()); for (int i=0; i<pro_obj1.nx(); i++) { for (int k=0; k<pro_obj1.ny(); k++) ao_array(i) += pro_obj1(i,k,index_z) * pro_obj2(k); } } return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } //------------------------------------------------------------------------------ // log (to_array) //------------------------------------------------------------------------------ template <class PARAM_TYPE, bool ARRAY_TYPE> to_array<PARAM_TYPE,ARRAY_TYPE> log (const to_array<PARAM_TYPE,ARRAY_TYPE>& pro_obj) { to_array<PARAM_TYPE,ARRAY_TYPE> ao_array; ao_array.alloc(pro_obj.nx(),pro_obj.ny(),pro_obj.nz()); for(int i=0;i<pro_obj.nx();i++) for(int j=0;j<pro_obj.ny();j++) for(int k=0;k<pro_obj.nz();k++) ao_array(i,j,k)=log(pro_obj(i,j,k)); return (to_array<PARAM_TYPE,ARRAY_TYPE>(ao_array)); } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl *, 2> Pair; public: SpecialMemberOverloadResult() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S); ~FPFeaturesStateRAII(); FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; LangOptions::FPEvalMethodKind OldEvalMethod; SourceLocation OldFPPragmaLocation; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; private: Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; bool WarnedDarwinSDKInfoMissing = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool IsPartition = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// The global module fragment of the current translation unit. clang::Module *GlobalModuleFragment = nullptr; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// An enumeration to represent the transition of states in parsing module /// fragments and imports. If we are not parsing a C++20 TU, or we find /// an error in state transition, the state is set to NotACXX20Module. enum class ModuleImportState { FirstDecl, ///< Parsing the first decl in a TU. GlobalFragment, ///< after 'module;' but before 'module X;' ImportAllowed, ///< after 'module X;' but before any non-import decl. ImportFinished, ///< after any non-import decl. PrivateFragment, ///< after 'module :private;'. NotACXX20Module ///< Not a C++20 TU, or an invalid state was found. }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, ModuleIdPath Partition, ModuleImportState &ImportState); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module toplevel name as an access path. /// \param Partition The module partition name as an access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path, ModuleIdPath Partition = {}); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base, MultiExprArg Args); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A, bool SkipArgCountCheck = false); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A, bool SkipArgCountCheck = false); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI, const Expr *E, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, bool AllowRecovery = false); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, MultiExprArg ArgExprs, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, const SourceRange &, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, ArrayRef<Expr *> ArgExprs, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occurred, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } void ActOnPragmaFPEvalMethod(SourceLocation Loc, LangOptions::FPEvalMethodKind Value); /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive with indirect clause. Optional<Expr *> Indirect; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, DeclareTargetContextInfo &DTCI); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true if currently in OpenMP task with untied clause context. bool isInOpenMPTaskUntiedContext() const; /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; QualType PreferredConditionType(ConditionKind K) const { return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy; } ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK, bool MissingOK = false); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc, bool Braced); QualType ProduceCtorInitMemberSignatureHelp( Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc, bool Braced); QualType ProduceTemplateArgumentSignatureHelp( TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, llvm::DenseSet<QualType> Visited, ValueDecl *DeclToCheck); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_unaryop__identity_int32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_int16 // op(A') function: GB_tran__identity_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_int16 ( int32_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nest.c
#include<stdio.h> #include<omp.h> // DOESN'T WORK!! int main(int argc, char *argv[]){ int nThreads = 4; omp_set_num_threads(nThreads); omp_set_nested(1); int n = 0; scanf("%d", &n); #pragma omp parallel { #pragma omp for for(int i = 0; i < n; i++){ #pragma omp for for(int j = 0; j < n; j++){ printf("Hello from thread #%d iteration i#%d j#%d\n", omp_get_thread_num(), i, j); } } } return 0; }
second_implementation.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> #define BUFF_SIZE 50 #define MAXSIZE 100 #define MAXLINE 50 #define N_EXEC 100 //number of parallel executed k_means algorithms #define N_FOLD 10 //number of folds for cross validation #define K_MAX 10 //max number of cluster #define CHUNKSIZE 10 #define IMPL 2 char FILEPATH[] = "data/iris_high.txt"; int k = 2; // initial value of clusters int threads, chunkn, chunkm; struct data { int dim; int atts; float** data; }; //structure that contains all SSE and centroids computed struct history { float SSE; float** centroids; }; float calcSilhouette(float** dataset, int **clusters, float** centroids, int n, int m); struct data loadDataset(char* fileName, char* dist); void normalize(struct data* dataset); void datasetSubSets(struct data dataset, int fold, struct data* trainingSet, struct data* testSet); float mainAlgo(struct data training, struct data test, int flagFinal); void kmeans (struct data structure, int numIte, float tol, struct history* recordStoria); void copySubMatrix(float** centroids, float** dataset, int *ranNum, int m); void randomIndexes(int *ranNum, int n); void zeroClusters(int **clusters, int n); void findClusters(float** dataset, int **clusters, float** centroids, int n, int m); float calcSSE(float** dataset, int **clusters, float** centroids, int n, int m); void freeArray(float **a, int n); void copyMatrix(float **mat1, float **mat2, int row, int col); void printData(struct data dataset); void getRow(float **matrix, int row, float *array, int m); float eucliDist(float *rec1, float *rec2, int m); void findCentroids(float** centroids, int **clusters, float** dataset, int n, int m); void printClusters(int **clusters, int n); void printCentroids(float** centroids, int m); void writeFile(float** data, int **clusters, int n, int m); void freeArrayInt(int **a, int n); int omp_thread_count(); ////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////// int main (int argc, char *argv[]) { threads = omp_thread_count(); strcpy(FILEPATH, argv[1]); char file[30] = "../data/"; strcat(file,argv[1]); strcat(file,".csv"); double begin = omp_get_wtime(); double end; struct data dataset = loadDataset(file, "\t"); chunkn = dataset.dim / threads; chunkm = (dataset.atts / threads) + 1; printf("\nCHUNKN: %d - CHUNKM: %d\n", chunkn, chunkm); normalize(&dataset); printf("DIM: %d\n", dataset.dim); struct data trainingSet; struct data testSet; int bestk=2; float sumSil[ K_MAX-1 ], appSSE, appSIL, SIL[ K_MAX-1 ]; for(k = 2; k<=K_MAX; k++) { printf("\nAnalizing for k = %d", k); sumSil[k-2] = 0; //N_FOLD-fold cross validation for(int fold=0; fold<N_FOLD; fold++) { trainingSet.data = (float**) calloc (dataset.dim - (dataset.dim / N_FOLD), sizeof(float*)); testSet.data = (float**) calloc (dataset.dim / N_FOLD, sizeof(float*)); datasetSubSets(dataset, fold, &trainingSet, &testSet); sumSil[k-2] +=mainAlgo(trainingSet, testSet, 0); if(fold<N_FOLD-1){ freeArray(trainingSet.data, trainingSet.dim); freeArray(testSet.data, testSet.dim); } } sumSil[k-2] = sumSil[k-2]/N_FOLD; //AIC[k-2] = 2*k + log10(sumSil[k-2]/testSet.dim); //Akaike criterion result printf("\nSilhouette: %f",sumSil[k-2]); //printf("\nAIC: %f, altro: %f", AIC[k-2], testSet.dim*log10(sumSil[k-2]/testSet.dim)); if(k==2) appSIL = sumSil[k-2]; if(sumSil[k-2] > appSIL) { bestk = k; appSIL = sumSil[k-2]; } freeArray(trainingSet.data, trainingSet.dim); freeArray(testSet.data, testSet.dim); end = omp_get_wtime(); double time_spent = (end - begin); printf("\nTime from start: %lf sec \n------------------------", time_spent); } printf("\n best k is: %d with Silhuette: %f", bestk, appSIL); // Setting the number of clusters to the best one chosen before as a result of AIC compare k = bestk; mainAlgo(dataset, dataset, 1); end = omp_get_wtime(); FILE* fd; fd = fopen("tempi.txt", "a"); fprintf(fd,"\n%lf sec\t%d\t%s\t%d",(end - begin), omp_get_num_threads(), FILEPATH, IMPL); printf("\nk_max= %d, Total time: %lf sec\n",K_MAX,(end - begin)); } //main algorithm //flag is =1 only when final iteration is computed (in order to write out the results) float mainAlgo(struct data training, struct data test, int flagFinal) { struct history bestStoria; float supportSSE; int** bestClusters =(int**) calloc(test.dim, sizeof(int*)); bestStoria.centroids = (float**) malloc(k * sizeof(float*)); for(int i=0; i<test.dim; i++) bestClusters[i] =(int*) calloc(k, sizeof(int)); for(int i=0; i<k; i++) bestStoria.centroids[i] =(float*) malloc(training.atts *sizeof(float)); bestStoria.SSE = training.atts * training.dim; struct history storia; storia.centroids = (float**) calloc(k, sizeof(float*)); for(int i=0; i<k; i++) { storia.centroids[i] =(float*) calloc(training.atts, sizeof(float)); } srand( time(NULL) ); for(int i=0; i<N_EXEC; i++) { kmeans(training, 5000, 0.001, &storia); if(storia.SSE <= bestStoria.SSE) { copyMatrix(bestStoria.centroids, storia.centroids, k, training.atts); bestStoria.SSE = storia.SSE; } } zeroClusters(bestClusters, test.dim); //reset best clusters matrix findClusters(test.data, bestClusters, bestStoria.centroids, test.dim, test.atts); //print last iteration results if(flagFinal == 1) { printCentroids(bestStoria.centroids, test.atts); writeFile(test.data, bestClusters, test.dim, test.atts); } float SSEtrovato = calcSSE(test.data, bestClusters, bestStoria.centroids, test.dim, test.atts); float silTrovata = calcSilhouette(test.data, bestClusters, bestStoria.centroids, test.dim, test.atts); //printf("\nthread: %d", omp_get_thread_num()); freeArray(bestStoria.centroids, k); freeArray(storia.centroids, k); freeArrayInt(bestClusters, test.dim); return silTrovata; } float calcSilhouette(float** dataset, int **clusters, float** centroids, int n, int m){ float sum=0.0, supDataset[m], supCentroid[m], avgi[k], avge[k], minAvge = 10, a, b, max = 0, sil, meansil = 0; int trovato = 0; int ci = 0; for(int ki=0; ki<k; ki++){ getRow(centroids, ki, supCentroid, m); avgi[ki] = 0; max = 0; for(int i=0;i<n;i++){ getRow(dataset, i, supDataset, m); ci += clusters[i][ki]; avgi[ki] += eucliDist(supCentroid, supDataset, m) * clusters[i][ki]; if(trovato==0) avge[ki] += eucliDist(supCentroid, supDataset, m) * (1 - clusters[i][ki]); if(clusters[i][ki]==0) trovato = 1; if(clusters[i][ki] == 0 && minAvge > eucliDist(supCentroid, supDataset, m)) minAvge = eucliDist(supCentroid, supDataset, m); } if(ci!=0) avgi[ki] = avgi[ki] / ci; trovato = 0; ci=0; } float lowestAvge = minAvge, avgiMean=0; for(int ki=0;ki<k;ki++){ if(lowestAvge>=avgi[ki]){ max = lowestAvge; sil = 1-(avgi[ki]/lowestAvge); } else{ max = avgi[ki]; sil = (lowestAvge/avgi[ki])-1; } meansil += sil; } meansil = meansil / k; return meansil; } void kmeans (struct data structure, int numIte, float tol, struct history* recordStoria) { int n, m, *ranNum; n = structure.dim; m = structure.atts; ranNum = (int*) calloc(k, sizeof(int)); //generate k random indexes to start from randomIndexes(ranNum, n); float** centroids = (float**) calloc(k, sizeof(float*)); int** clusters =(int**) calloc(n, sizeof(int*)); for(int i=0; i<n; i++) { clusters[i] =(int*) calloc(k, sizeof(int)); } for(int i=0; i<k; i++) { centroids[i] =(float*) calloc(m, sizeof(float)); } //saving initial centroids copySubMatrix(centroids, structure.data, ranNum, m); /*saving indexes that corresponds to each of the k clusters. Results will be stored into a matrix which columns are the cluster number and the rows corresponds to the indexes of records */ zeroClusters(clusters, n); //pongo a 0 tutti gli elementi del cluster findClusters(structure.data, clusters, centroids, n, m); int count = 0; float newSSE, currSSE; float** supCentroids = (float**) calloc(k, sizeof(float*)); for(int j = 0; j < k; j++) supCentroids[j] = (float*) calloc(m, sizeof(float)); do { currSSE = calcSSE(structure.data, clusters, centroids, n, m); copyMatrix(supCentroids, centroids, k, m); findCentroids(centroids, clusters, structure.data, n, m); zeroClusters(clusters, n); findClusters(structure.data,clusters,centroids, n, m); newSSE = calcSSE(structure.data, clusters, centroids, n, m); //sum of square errors calculation count++; } while(count < numIte && ((currSSE-newSSE)/currSSE) > tol); if(newSSE > currSSE) { copyMatrix(centroids, supCentroids, k, m); newSSE = currSSE; } copyMatrix(recordStoria->centroids, centroids, k, m); recordStoria->SSE = newSSE; freeArray(centroids,k); freeArrayInt(clusters,n); freeArray(supCentroids,k); free(ranNum); } void printData(struct data dataset) { printf("\n"); for(int i = 0; i < dataset.dim; i++) { printf("%d\t", i + 1); for(int j = 0; j < dataset.atts; j++) { printf("%.2f\t", dataset.data[i][j]); } printf("\n"); } } void normalize(struct data* dataset) { int i, j; printf("Normalizing the data\n"); float max[dataset->atts]; // Look for max of each column for(i = 0; i < dataset->dim; i++) { for(j = 0; j < dataset->atts; j++) { if(i == 0) { max[j] = 0; } if(max[j] < dataset->data[i][j]) max[j] = dataset->data[i][j]; } } // Normalize the data by dividing each value by the max value of the column for(i = 0; i < dataset->dim; i++) { for(j = 0; j < dataset->atts; j++) { dataset->data[i][j] = dataset->data[i][j] / max[j]; } } } void datasetSubSets(struct data dataset, int fold, struct data* trainingSet, struct data* testSet) { int init, end, apptr = 0, appte=0; init = fold * (dataset.dim / N_FOLD); end = ((fold + 1) * (dataset.dim / N_FOLD)) - 1; trainingSet->dim = 0; trainingSet->atts = dataset.atts; testSet->dim = 0; testSet->atts = dataset.atts; for (int i = 0; i < dataset.dim; i++) { if(i >= init && i <= end) { testSet->data[appte] = (float*) calloc(dataset.atts, sizeof(float)); for(int u=0;u<dataset.atts;u++) testSet->data[appte][u] = dataset.data[i][u]; appte++; } else { trainingSet->data[apptr] = (float*) calloc(dataset.atts, sizeof(float)); for(int u=0; u<dataset.atts; u++) trainingSet->data[apptr][u] = dataset.data[i][u]; apptr++; } } testSet->dim = appte; trainingSet->dim = apptr; } struct data loadDataset(char* fileName, char* dist) { FILE *file; int max = MAXSIZE; float* app; float** data; struct data dataset; // Open file and check for I/O errors file = fopen(fileName, "r"); if (file == NULL) exit(-1); else printf("Loading from file: %s\n", fileName); char buffer[MAXLINE]; // Count the number of attributes if (fgets(buffer, sizeof(buffer), file)) { // Get the number of attributes sscanf(buffer, "%d", &dataset.atts); printf("atts: %d\n", dataset.atts); } // Read the actual data from the file dataset.dim = 0; // Read line by line until there are more in the file dataset.data = (float**) calloc (MAXSIZE, sizeof(float*)); while(fgets(buffer, sizeof(buffer), file)) { dataset.data[dataset.dim] = (float*) calloc(dataset.atts, sizeof(float)); // Take the first token in the current line of data char *headapp = strtok(buffer, dist); int i = 0; do { // Get the token and convert it to float dataset.data[dataset.dim][i] = atof(headapp); // Get the next token headapp = strtok(NULL, dist); i++; } while(headapp != NULL); dataset.dim++; if(dataset.dim > max) { max += MAXSIZE; dataset.data = (float**) realloc(dataset.data, sizeof(float) * dataset.atts * max); } } return dataset; } void freeArray(float **a, int n) { for (int i = 0; i < n; ++i) { free(a[i]); } free(a); } void freeArrayInt(int **a, int n) { for (int i = 0; i < n; ++i) { free(a[i]); } free(a); } void copyMatrix(float **mat1, float **mat2, int row, int col){ //mat1 dest, mat2 source for (int i = 0; i < row; i++) { for (int j = 0; j < col; j++) { mat1[i][j] = mat2[i][j]; } } } //copy datasets records that corrensponds to centroids void copySubMatrix(float **centroids, float** dataset, int *ranNum, int m) { for(int i=0; i<k; i++) { for(int j=0; j<m; j++) centroids[i][j] = dataset[ ranNum[i] ][j]; } } float calcSSE(float** dataset, int **clusters, float** centroids, int n, int m){ float sum=0.0, supDataset[m], supCentroid[m]; for(int ki=0; ki<k; ki++){ getRow(centroids, ki, supCentroid, m); #pragma omp parallel for private(supDataset) reduction(+:sum) schedule(static, chunkn) for(int i=0;i<n;i++){ getRow(dataset, i, supDataset, m); sum += eucliDist(supCentroid, supDataset, m) * clusters[i][ki]; } } return sum; } void printClusters(int **clusters, int n){ printf("\t\t"); for(int ki=0;ki<k;ki++){ printf("k%d\t",ki); } for(int i=0;i<n;i++){ printf("\nrecord%d:\t",i); for(int j=0;j<k;j++){ printf("%d\t",clusters[i][j]); } } printf("\n"); } void findCentroids(float** centroids, int **clusters, float** dataset, int n, int m) { int elemCluster=0; float record[m]; //reset array for(int ki=0; ki<k; ki++) { for(int p=0; p<m; p++) { record[p] = 0; } for(int i=0; i<n; i++) { if(clusters[i][ki]!=0) { elemCluster++; for(int j=0; j<m; j++) record[j] += dataset[i][j]; } } #pragma omp parallel for schedule(static, chunkm) if(m >= omp_get_num_threads()) for(int p=0; p<m; p++) { if(elemCluster!=0) record[p] = record[p]/elemCluster; else record[p]=0; centroids[ki][p] = record[p]; } elemCluster = 0; } } void printCentroids(float** centroids, int m) { int p, i; for(i = 0; i < k; i++) { printf("\ncentroide cluster %d esimo: ",i); for(p=0;p<m;p++){ printf("%.2f,",centroids[i][p]); } printf("\n"); } } void zeroClusters(int **clusters, int n){ for(int i=0;i<n;i++){ for(int j=0;j<k;j++){ clusters[i][j] = 0; } } } //it assigns each dataset record to the nearest centroid, which corrensponds to its cluster void findClusters(float** dataset, int **clusters, float **centroids, int n, int m){ int salvaK=0; float supCentroid[m], supDataset[m], dist=0, lowerDist; #pragma omp parallel for schedule(static, chunkn) private(lowerDist, dist, salvaK, supCentroid, supDataset) for(int i=0;i<n;i++){ lowerDist = m; // lowerDist = m because data is normalized, so the max dist between 2 records is the number of attributes for(int ki=0;ki<k;ki++){ getRow(centroids, ki, supCentroid, m); //extract a row from the centroid matrix getRow(dataset, i, supDataset, m); //extract a row from the dataset matrix dist = eucliDist(supCentroid, supDataset, m); //computing the euclidean distance if(dist<=lowerDist) { lowerDist = dist; salvaK = ki; } } clusters[i][salvaK] = 1; } } void getRow(float **matrix, int row, float *array, int m){ for(int j = 0; j < m; j++){ array[j] = matrix[row][j]; } } /*function that generate k indexes, which will refer to k initial centroids*/ void randomIndexes(int *ranNum, int n){ int i; for(i=0;i<k;i++){ ranNum[i] = rand()%n; } } /*receive two records in input so it calculates euclidean distance, continuos data required */ float eucliDist(float *rec1, float *rec2, int m){ float sum = 0.0; for(int i=0;i<m;i++){ sum += ((rec1[i]-rec2[i]))*((rec1[i]-rec2[i])); } return sqrt(sum); } void writeFile(float** data ,int **clusters, int n, int m) { FILE* fd; char output[30] = "out/"; strcat(output,FILEPATH); strcat(output,".txt"); fd = fopen(output, "w"); for(int i = 0; i < n; i++) { fprintf(fd, "%d", i); for(int j = 0; j < k; j++) { fprintf(fd, "\t%d", clusters[i][j]); } fprintf(fd,"\n"); } } int omp_thread_count() { int n = 0; #pragma omp parallel reduction(+:n) n += 1; printf("\nNUM THREADS: %d\n", n); return n; }
GB_unop__identity_fc64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_int32) // op(A') function: GB (_unop_tran__identity_fc64_int32) // C type: GxB_FC64_t // A type: int32_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_int32) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
do_sum_omp_wbittrunc.c
typedef unsigned int uint; double bittruncate(double sum, uint nbits); double do_sum_omp_wbittrunc(double* restrict var, long ncells, uint nbits) { // Serial sum double sum = 0.0; #pragma omp parallel for reduction(+: sum) for (long i = 0; i < ncells; i++){ sum += var[i]; } sum = bittruncate(sum, nbits); return(sum); }
sp-wrongSVE.c
typedef signed char __int8_t; typedef unsigned char __uint8_t; typedef short __int16_t; typedef unsigned short __uint16_t; typedef int __int32_t; typedef unsigned int __uint32_t; typedef long long __int64_t; typedef unsigned long long __uint64_t; typedef long __darwin_intptr_t; typedef unsigned int __darwin_natural_t; typedef int __darwin_ct_rune_t; union stUn_imopVarPre0 { char __mbstate8[128]; long long _mbstateL; } ; typedef union stUn_imopVarPre0 __mbstate_t; typedef __mbstate_t __darwin_mbstate_t; typedef long int __darwin_ptrdiff_t; typedef long unsigned int __darwin_size_t; typedef __builtin_va_list __darwin_va_list; typedef int __darwin_wchar_t; typedef __darwin_wchar_t __darwin_rune_t; typedef int __darwin_wint_t; typedef unsigned long __darwin_clock_t; typedef __uint32_t __darwin_socklen_t; typedef long __darwin_ssize_t; typedef long __darwin_time_t; typedef __int64_t __darwin_blkcnt_t; typedef __int32_t __darwin_blksize_t; typedef __int32_t __darwin_dev_t; typedef unsigned int __darwin_fsblkcnt_t; typedef unsigned int __darwin_fsfilcnt_t; typedef __uint32_t __darwin_gid_t; typedef __uint32_t __darwin_id_t; typedef __uint64_t __darwin_ino64_t; typedef __darwin_ino64_t __darwin_ino_t; typedef __darwin_natural_t __darwin_mach_port_name_t; typedef __darwin_mach_port_name_t __darwin_mach_port_t; typedef __uint16_t __darwin_mode_t; typedef __int64_t __darwin_off_t; typedef __int32_t __darwin_pid_t; typedef __uint32_t __darwin_sigset_t; typedef __int32_t __darwin_suseconds_t; typedef __uint32_t __darwin_uid_t; typedef __uint32_t __darwin_useconds_t; typedef unsigned char __darwin_uuid_t[16]; typedef char __darwin_uuid_string_t[37]; struct __darwin_pthread_handler_rec { void ( *__routine )(void *); void *__arg; struct __darwin_pthread_handler_rec *__next; } ; struct _opaque_pthread_attr_t { long __sig; char __opaque[56]; } ; struct _opaque_pthread_cond_t { long __sig; char __opaque[40]; } ; struct _opaque_pthread_condattr_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_mutex_t { long __sig; char __opaque[56]; } ; struct _opaque_pthread_mutexattr_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_once_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_rwlock_t { long __sig; char __opaque[192]; } ; struct _opaque_pthread_rwlockattr_t { long __sig; char __opaque[16]; } ; struct _opaque_pthread_t { long __sig; struct __darwin_pthread_handler_rec *__cleanup_stack; char __opaque[8176]; } ; typedef struct _opaque_pthread_attr_t __darwin_pthread_attr_t; typedef struct _opaque_pthread_cond_t __darwin_pthread_cond_t; typedef struct _opaque_pthread_condattr_t __darwin_pthread_condattr_t; typedef unsigned long __darwin_pthread_key_t; typedef struct _opaque_pthread_mutex_t __darwin_pthread_mutex_t; typedef struct _opaque_pthread_mutexattr_t __darwin_pthread_mutexattr_t; typedef struct _opaque_pthread_once_t __darwin_pthread_once_t; typedef struct _opaque_pthread_rwlock_t __darwin_pthread_rwlock_t; typedef struct _opaque_pthread_rwlockattr_t __darwin_pthread_rwlockattr_t; typedef struct _opaque_pthread_t *__darwin_pthread_t; typedef int __darwin_nl_item; typedef int __darwin_wctrans_t; typedef __uint32_t __darwin_wctype_t; typedef __darwin_va_list va_list; typedef __darwin_size_t size_t; typedef __darwin_off_t fpos_t; struct __sbuf { unsigned char *_base; int _size; } ; struct __sFILEX ; struct __sFILE { unsigned char *_p; int _r; int _w; short _flags; short _file; struct __sbuf _bf; int _lbfsize; void *_cookie; int ( *_close )(void *); int ( *_read )(void *, char * , int ); fpos_t ( *_seek )(void *, fpos_t , int ); int ( *_write )(void *, const char * , int ); struct __sbuf _ub; struct __sFILEX *_extra; int _ur; unsigned char _ubuf[3]; unsigned char _nbuf[1]; struct __sbuf _lb; int _blksize; fpos_t _offset; } ; typedef struct __sFILE FILE; int fclose(FILE *); int fgetc(FILE *); FILE *fopen(const char *restrict __filename, const char *restrict __mode); int fscanf(FILE *restrict , const char *restrict , ...); int printf(const char *restrict , ...); typedef __darwin_off_t off_t; typedef __darwin_ssize_t ssize_t; enum enum_imopVarPre1 { P_ALL, P_PID , P_PGID } ; typedef enum enum_imopVarPre1 idtype_t; typedef __darwin_pid_t pid_t; typedef __darwin_id_t id_t; typedef int sig_atomic_t; struct __darwin_i386_thread_state { unsigned int __eax; unsigned int __ebx; unsigned int __ecx; unsigned int __edx; unsigned int __edi; unsigned int __esi; unsigned int __ebp; unsigned int __esp; unsigned int __ss; unsigned int __eflags; unsigned int __eip; unsigned int __cs; unsigned int __ds; unsigned int __es; unsigned int __fs; unsigned int __gs; } ; struct __darwin_fp_control { unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , :2 , __pc: 2 , __rc: 2 , :1 , :3; } ; typedef struct __darwin_fp_control __darwin_fp_control_t; struct __darwin_fp_status { unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , __stkflt: 1 , __errsumm: 1 , __c0: 1 , __c1: 1 , __c2: 1 , __tos: 3 , __c3: 1 , __busy: 1; } ; typedef struct __darwin_fp_status __darwin_fp_status_t; struct __darwin_mmst_reg { char __mmst_reg[10]; char __mmst_rsrv[6]; } ; struct __darwin_xmm_reg { char __xmm_reg[16]; } ; struct __darwin_i386_float_state { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; char __fpu_rsrv4[14 * 16]; int __fpu_reserved1; } ; struct __darwin_i386_avx_state { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; char __fpu_rsrv4[14 * 16]; int __fpu_reserved1; char __avx_reserved1[64]; struct __darwin_xmm_reg __fpu_ymmh0; struct __darwin_xmm_reg __fpu_ymmh1; struct __darwin_xmm_reg __fpu_ymmh2; struct __darwin_xmm_reg __fpu_ymmh3; struct __darwin_xmm_reg __fpu_ymmh4; struct __darwin_xmm_reg __fpu_ymmh5; struct __darwin_xmm_reg __fpu_ymmh6; struct __darwin_xmm_reg __fpu_ymmh7; } ; struct __darwin_i386_exception_state { __uint16_t __trapno; __uint16_t __cpu; __uint32_t __err; __uint32_t __faultvaddr; } ; struct __darwin_x86_debug_state32 { unsigned int __dr0; unsigned int __dr1; unsigned int __dr2; unsigned int __dr3; unsigned int __dr4; unsigned int __dr5; unsigned int __dr6; unsigned int __dr7; } ; struct __darwin_x86_thread_state64 { __uint64_t __rax; __uint64_t __rbx; __uint64_t __rcx; __uint64_t __rdx; __uint64_t __rdi; __uint64_t __rsi; __uint64_t __rbp; __uint64_t __rsp; __uint64_t __r8; __uint64_t __r9; __uint64_t __r10; __uint64_t __r11; __uint64_t __r12; __uint64_t __r13; __uint64_t __r14; __uint64_t __r15; __uint64_t __rip; __uint64_t __rflags; __uint64_t __cs; __uint64_t __fs; __uint64_t __gs; } ; struct __darwin_x86_float_state64 { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; struct __darwin_xmm_reg __fpu_xmm8; struct __darwin_xmm_reg __fpu_xmm9; struct __darwin_xmm_reg __fpu_xmm10; struct __darwin_xmm_reg __fpu_xmm11; struct __darwin_xmm_reg __fpu_xmm12; struct __darwin_xmm_reg __fpu_xmm13; struct __darwin_xmm_reg __fpu_xmm14; struct __darwin_xmm_reg __fpu_xmm15; char __fpu_rsrv4[6 * 16]; int __fpu_reserved1; } ; struct __darwin_x86_avx_state64 { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; struct __darwin_xmm_reg __fpu_xmm8; struct __darwin_xmm_reg __fpu_xmm9; struct __darwin_xmm_reg __fpu_xmm10; struct __darwin_xmm_reg __fpu_xmm11; struct __darwin_xmm_reg __fpu_xmm12; struct __darwin_xmm_reg __fpu_xmm13; struct __darwin_xmm_reg __fpu_xmm14; struct __darwin_xmm_reg __fpu_xmm15; char __fpu_rsrv4[6 * 16]; int __fpu_reserved1; char __avx_reserved1[64]; struct __darwin_xmm_reg __fpu_ymmh0; struct __darwin_xmm_reg __fpu_ymmh1; struct __darwin_xmm_reg __fpu_ymmh2; struct __darwin_xmm_reg __fpu_ymmh3; struct __darwin_xmm_reg __fpu_ymmh4; struct __darwin_xmm_reg __fpu_ymmh5; struct __darwin_xmm_reg __fpu_ymmh6; struct __darwin_xmm_reg __fpu_ymmh7; struct __darwin_xmm_reg __fpu_ymmh8; struct __darwin_xmm_reg __fpu_ymmh9; struct __darwin_xmm_reg __fpu_ymmh10; struct __darwin_xmm_reg __fpu_ymmh11; struct __darwin_xmm_reg __fpu_ymmh12; struct __darwin_xmm_reg __fpu_ymmh13; struct __darwin_xmm_reg __fpu_ymmh14; struct __darwin_xmm_reg __fpu_ymmh15; } ; struct __darwin_x86_exception_state64 { __uint16_t __trapno; __uint16_t __cpu; __uint32_t __err; __uint64_t __faultvaddr; } ; struct __darwin_x86_debug_state64 { __uint64_t __dr0; __uint64_t __dr1; __uint64_t __dr2; __uint64_t __dr3; __uint64_t __dr4; __uint64_t __dr5; __uint64_t __dr6; __uint64_t __dr7; } ; struct __darwin_mcontext32 { struct __darwin_i386_exception_state __es; struct __darwin_i386_thread_state __ss; struct __darwin_i386_float_state __fs; } ; struct __darwin_mcontext_avx32 { struct __darwin_i386_exception_state __es; struct __darwin_i386_thread_state __ss; struct __darwin_i386_avx_state __fs; } ; struct __darwin_mcontext64 { struct __darwin_x86_exception_state64 __es; struct __darwin_x86_thread_state64 __ss; struct __darwin_x86_float_state64 __fs; } ; struct __darwin_mcontext_avx64 { struct __darwin_x86_exception_state64 __es; struct __darwin_x86_thread_state64 __ss; struct __darwin_x86_avx_state64 __fs; } ; typedef struct __darwin_mcontext64 *mcontext_t; typedef __darwin_pthread_attr_t pthread_attr_t; struct __darwin_sigaltstack { void *ss_sp; __darwin_size_t ss_size; int ss_flags; } ; typedef struct __darwin_sigaltstack stack_t; struct __darwin_ucontext { int uc_onstack; __darwin_sigset_t uc_sigmask; struct __darwin_sigaltstack uc_stack; struct __darwin_ucontext *uc_link; __darwin_size_t uc_mcsize; struct __darwin_mcontext64 *uc_mcontext; } ; typedef struct __darwin_ucontext ucontext_t; typedef __darwin_sigset_t sigset_t; typedef __darwin_uid_t uid_t; union sigval { int sival_int; void *sival_ptr; } ; struct sigevent { int sigev_notify; int sigev_signo; union sigval sigev_value; void ( *sigev_notify_function )(union sigval ); pthread_attr_t *sigev_notify_attributes; } ; struct __siginfo { int si_signo; int si_errno; int si_code; pid_t si_pid; uid_t si_uid; int si_status; void *si_addr; union sigval si_value; long si_band; unsigned long __pad[7]; } ; typedef struct __siginfo siginfo_t; union __sigaction_u { void ( *__sa_handler )(int ); void ( *__sa_sigaction )(int , struct __siginfo * , void *); } ; struct __sigaction { union __sigaction_u __sigaction_u; void ( *sa_tramp )(void *, int , int , siginfo_t * , void *); sigset_t sa_mask; int sa_flags; } ; struct sigaction { union __sigaction_u __sigaction_u; sigset_t sa_mask; int sa_flags; } ; typedef void ( *sig_t )(int ); struct sigvec { void ( *sv_handler )(int ); int sv_mask; int sv_flags; } ; struct sigstack { char *ss_sp; int ss_onstack; } ; typedef signed char int8_t; typedef short int16_t; typedef int int32_t; typedef long long int64_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; typedef unsigned long long uint64_t; typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; typedef __darwin_intptr_t intptr_t; typedef unsigned long uintptr_t; typedef long int intmax_t; typedef long unsigned int uintmax_t; struct timeval { __darwin_time_t tv_sec; __darwin_suseconds_t tv_usec; } ; typedef __uint64_t rlim_t; struct rusage { struct timeval ru_utime; struct timeval ru_stime; long ru_maxrss; long ru_ixrss; long ru_idrss; long ru_isrss; long ru_minflt; long ru_majflt; long ru_nswap; long ru_inblock; long ru_oublock; long ru_msgsnd; long ru_msgrcv; long ru_nsignals; long ru_nvcsw; long ru_nivcsw; } ; typedef void *rusage_info_t; struct rusage_info_v0 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; } ; struct rusage_info_v1 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; } ; struct rusage_info_v2 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; uint64_t ri_diskio_bytesread; uint64_t ri_diskio_byteswritten; } ; struct rusage_info_v3 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; uint64_t ri_diskio_bytesread; uint64_t ri_diskio_byteswritten; uint64_t ri_cpu_time_qos_default; uint64_t ri_cpu_time_qos_maintenance; uint64_t ri_cpu_time_qos_background; uint64_t ri_cpu_time_qos_utility; uint64_t ri_cpu_time_qos_legacy; uint64_t ri_cpu_time_qos_user_initiated; uint64_t ri_cpu_time_qos_user_interactive; uint64_t ri_billed_system_time; uint64_t ri_serviced_system_time; } ; typedef struct rusage_info_v3 rusage_info_current; struct rlimit { rlim_t rlim_cur; rlim_t rlim_max; } ; struct proc_rlimit_control_wakeupmon { uint32_t wm_flags; int32_t wm_rate; } ; union wait { int w_status; struct stUn_imopVarPre2 { unsigned int w_Termsig: 7, w_Coredump: 1 , w_Retcode: 8 , w_Filler: 16; } w_T; struct stUn_imopVarPre3 { unsigned int w_Stopval: 8, w_Stopsig: 8 , w_Filler: 16; } w_S; } ; typedef __darwin_ct_rune_t ct_rune_t; typedef __darwin_rune_t rune_t; typedef __darwin_wchar_t wchar_t; struct stUn_imopVarPre4 { int quot; int rem; } ; typedef struct stUn_imopVarPre4 div_t; struct stUn_imopVarPre5 { long quot; long rem; } ; typedef struct stUn_imopVarPre5 ldiv_t; struct stUn_imopVarPre6 { long long quot; long long rem; } ; typedef struct stUn_imopVarPre6 lldiv_t; void exit(int ); typedef unsigned char u_int8_t; typedef unsigned short u_int16_t; typedef unsigned int u_int32_t; typedef unsigned long long u_int64_t; typedef int64_t register_t; typedef u_int64_t user_addr_t; typedef u_int64_t user_size_t; typedef int64_t user_ssize_t; typedef int64_t user_long_t; typedef u_int64_t user_ulong_t; typedef int64_t user_time_t; typedef int64_t user_off_t; typedef u_int64_t syscall_arg_t; typedef __darwin_dev_t dev_t; typedef __darwin_mode_t mode_t; typedef float float_t; typedef double double_t; extern double fabs(double ); extern double pow(double , double ); extern double sqrt(double ); struct __float2 { float __sinval; float __cosval; } ; struct __double2 { double __sinval; double __cosval; } ; struct exception { int type; char *name; double arg1; double arg2; double retval; } ; typedef int boolean; struct stUn_imopVarPre11 { double real; double imag; } ; typedef struct stUn_imopVarPre11 dcomplex; extern void timer_clear(int ); extern void timer_start(int ); extern void timer_stop(int ); extern double timer_read(int ); extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand); static int grid_points[3]; static double tx1; static double tx2; static double tx3; static double ty1; static double ty2; static double ty3; static double tz1; static double tz2; static double tz3; static double dx1; static double dx2; static double dx3; static double dx4; static double dx5; static double dy1; static double dy2; static double dy3; static double dy4; static double dy5; static double dz1; static double dz2; static double dz3; static double dz4; static double dz5; static double dssp; static double dt; static double ce[13][5]; static double dxmax; static double dymax; static double dzmax; static double xxcon1; static double xxcon2; static double xxcon3; static double xxcon4; static double xxcon5; static double dx1tx1; static double dx2tx1; static double dx3tx1; static double dx4tx1; static double dx5tx1; static double yycon1; static double yycon2; static double yycon3; static double yycon4; static double yycon5; static double dy1ty1; static double dy2ty1; static double dy3ty1; static double dy4ty1; static double dy5ty1; static double zzcon1; static double zzcon2; static double zzcon3; static double zzcon4; static double zzcon5; static double dz1tz1; static double dz2tz1; static double dz3tz1; static double dz4tz1; static double dz5tz1; static double dnxm1; static double dnym1; static double dnzm1; static double c1c2; static double c1c5; static double c3c4; static double c1345; static double conz1; static double c1; static double c2; static double c3; static double c4; static double c5; static double c4dssp; static double c5dssp; static double dtdssp; static double dttx1; static double bt; static double dttx2; static double dtty1; static double dtty2; static double dttz1; static double dttz2; static double c2dttx1; static double c2dtty1; static double c2dttz1; static double comz1; static double comz4; static double comz5; static double comz6; static double c3c4tx3; static double c3c4ty3; static double c3c4tz3; static double c2iv; static double con43; static double con16; static double u[5][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double us[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double vs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double ws[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double qs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double ainv[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double rho_i[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double speed[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double square[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double rhs[5][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double forcing[5][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double lhs[15][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double cv[12]; static double rhon[12]; static double rhos[12]; static double rhoq[12]; static double cuf[12]; static double q[12]; static double ue[5][12]; static double buf[5][12]; static void add(void ); static void adi(void ); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void ); static void exact_solution(double xi, double eta , double zeta , double dtemp[5]); static void initialize(void ); static void lhsinit(void ); static void lhsx(void ); static void lhsy(void ); static void lhsz(void ); static void ninvr(void ); static void pinvr(void ); static void compute_rhs(void ); static void set_constants(void ); static void txinvr(void ); static void tzetar(void ); static void verify(int no_time_steps, char *class , boolean *verified); static void x_solve(void ); static void y_solve(void ); static void z_solve(void ); int main(int argc, char **argv) { int niter; int step; double mflops; double tmax; int nthreads = 1; boolean verified; char class; FILE *fp; printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - SP Benchmark\n\n"); fp = fopen("inputsp.data", "r"); if (fp != ((void *) 0)) { printf(" Reading from input file inputsp.data\n"); int *_imopVarPre145; _imopVarPre145 = &niter; fscanf(fp, "%d", _imopVarPre145); int _imopVarPre147; _imopVarPre147 = fgetc(fp); while (_imopVarPre147 != '\n') { ; _imopVarPre147 = fgetc(fp); } double *_imopVarPre149; _imopVarPre149 = &dt; fscanf(fp, "%lf", _imopVarPre149); int _imopVarPre151; _imopVarPre151 = fgetc(fp); while (_imopVarPre151 != '\n') { ; _imopVarPre151 = fgetc(fp); } int *_imopVarPre155; int *_imopVarPre156; int *_imopVarPre157; _imopVarPre155 = &grid_points[2]; _imopVarPre156 = &grid_points[1]; _imopVarPre157 = &grid_points[0]; fscanf(fp, "%d%d%d", _imopVarPre157, _imopVarPre156, _imopVarPre155); fclose(fp); } else { printf(" No input file inputsp.data. Using compiled defaults"); niter = 100; dt = 0.015; grid_points[0] = 12; grid_points[1] = 12; grid_points[2] = 12; } int _imopVarPre161; int _imopVarPre162; int _imopVarPre163; _imopVarPre161 = grid_points[2]; _imopVarPre162 = grid_points[1]; _imopVarPre163 = grid_points[0]; printf(" Size: %3dx%3dx%3d\n", _imopVarPre163, _imopVarPre162, _imopVarPre161); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); int _imopVarPre164; int _imopVarPre165; _imopVarPre164 = (grid_points[0] > 12); if (!_imopVarPre164) { _imopVarPre165 = (grid_points[1] > 12); if (!_imopVarPre165) { _imopVarPre165 = (grid_points[2] > 12); } _imopVarPre164 = _imopVarPre165; } if (_imopVarPre164) { int _imopVarPre169; int _imopVarPre170; int _imopVarPre171; _imopVarPre169 = grid_points[2]; _imopVarPre170 = grid_points[1]; _imopVarPre171 = grid_points[0]; printf("%d, %d, %d\n", _imopVarPre171, _imopVarPre170, _imopVarPre169); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); adi(); initialize(); timer_clear(1); timer_start(1); for (step = 1; step <= niter; step++) { int _imopVarPre172; _imopVarPre172 = step % 20 == 0; if (!_imopVarPre172) { _imopVarPre172 = step == 1; } if (_imopVarPre172) { printf(" Time step %4d\n", step); } adi(); } #pragma omp parallel { } timer_stop(1); tmax = timer_read(1); int *_imopVarPre175; char *_imopVarPre176; _imopVarPre175 = &verified; _imopVarPre176 = &class; verify(niter, _imopVarPre176, _imopVarPre175); if (tmax != 0) { double _imopVarPre183; double _imopVarPre184; _imopVarPre183 = (double) 12; _imopVarPre184 = pow(_imopVarPre183, 3.0); mflops = (881.174 * _imopVarPre184 - 4683.91 * (((double) 12) * ((double) 12)) + 11484.5 * (double) 12 - 19272.4) * (double) niter / (tmax * 1000000.0); } else { mflops = 0.0; } int _imopVarPre188; int _imopVarPre189; int _imopVarPre190; _imopVarPre188 = grid_points[2]; _imopVarPre189 = grid_points[1]; _imopVarPre190 = grid_points[0]; c_print_results("SP", class, _imopVarPre190, _imopVarPre189, _imopVarPre188, niter, nthreads, tmax, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)"); } static void add(void ) { int i; int j; int k; int m; #pragma omp for nowait for (m = 0; m < 5; m++) { for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k]; } } } } } static void adi(void ) { compute_rhs(); txinvr(); x_solve(); y_solve(); z_solve(); add(); } static void error_norm(double rms[5]) { int i; int j; int k; int m; int d; double xi; double eta; double zeta; double u_exact[5]; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; for (j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; for (k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[m][i][j][k] - u_exact[m]; rms[m] = rms[m] + add * add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double) (grid_points[d] - 2); } double _imopVarPre192; double _imopVarPre193; _imopVarPre192 = rms[m]; _imopVarPre193 = sqrt(_imopVarPre192); rms[m] = _imopVarPre193; } } static void rhs_norm(double rms[5]) { int i; int j; int k; int d; int m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i <= grid_points[0] - 2; i++) { for (j = 0; j <= grid_points[1] - 2; j++) { for (k = 0; k <= grid_points[2] - 2; k++) { for (m = 0; m < 5; m++) { add = rhs[m][i][j][k]; rms[m] = rms[m] + add * add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double) (grid_points[d] - 2); } double _imopVarPre195; double _imopVarPre196; _imopVarPre195 = rms[m]; _imopVarPre196 = sqrt(_imopVarPre195); rms[m] = _imopVarPre196; } } static void exact_rhs(void ) { double dtemp[5]; double xi; double eta; double zeta; double dtpp; int m; int i; int j; int k; int ip1; int im1; int jp1; int jm1; int km1; int kp1; for (m = 0; m < 5; m++) { for (i = 0; i <= grid_points[0] - 1; i++) { for (j = 0; j <= grid_points[1] - 1; j++) { for (k = 0; k <= grid_points[2] - 1; k++) { forcing[m][i][j][k] = 0.0; } } } } for (k = 1; k <= grid_points[2] - 2; k++) { zeta = (double) k * dnzm1; for (j = 1; j <= grid_points[1] - 2; j++) { eta = (double) j * dnym1; for (i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[m][i] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[m][i] = dtpp * dtemp[m]; } cuf[i] = buf[1][i] * buf[1][i]; buf[0][i] = cuf[i] + buf[2][i] * buf[2][i] + buf[3][i] * buf[3][i]; q[i] = 0.5 * (buf[1][i] * ue[1][i] + buf[2][i] * ue[2][i] + buf[3][i] * ue[3][i]); } for (i = 1; i <= grid_points[0] - 2; i++) { im1 = i - 1; ip1 = i + 1; forcing[0][i][j][k] = forcing[0][i][j][k] - tx2 * (ue[1][ip1] - ue[1][im1]) + dx1tx1 * (ue[0][ip1] - 2.0 * ue[0][i] + ue[0][im1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tx2 * ((ue[1][ip1] * buf[1][ip1] + c2 * (ue[4][ip1] - q[ip1])) - (ue[1][im1] * buf[1][im1] + c2 * (ue[4][im1] - q[im1]))) + xxcon1 * (buf[1][ip1] - 2.0 * buf[1][i] + buf[1][im1]) + dx2tx1 * (ue[1][ip1] - 2.0 * ue[1][i] + ue[1][im1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tx2 * (ue[2][ip1] * buf[1][ip1] - ue[2][im1] * buf[1][im1]) + xxcon2 * (buf[2][ip1] - 2.0 * buf[2][i] + buf[2][im1]) + dx3tx1 * (ue[2][ip1] - 2.0 * ue[2][i] + ue[2][im1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tx2 * (ue[3][ip1] * buf[1][ip1] - ue[3][im1] * buf[1][im1]) + xxcon2 * (buf[3][ip1] - 2.0 * buf[3][i] + buf[3][im1]) + dx4tx1 * (ue[3][ip1] - 2.0 * ue[3][i] + ue[3][im1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tx2 * (buf[1][ip1] * (c1 * ue[4][ip1] - c2 * q[ip1]) - buf[1][im1] * (c1 * ue[4][im1] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[0][ip1] - 2.0 * buf[0][i] + buf[0][im1]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[4][ip1] - 2.0 * buf[4][i] + buf[4][im1]) + dx5tx1 * (ue[4][ip1] - 2.0 * ue[4][i] + ue[4][im1]); } for (m = 0; m < 5; m++) { i = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]); i = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]); } for (m = 0; m < 5; m++) { for (i = 3; i <= grid_points[0] - 4; i++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]); } } for (m = 0; m < 5; m++) { i = grid_points[0] - 3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1]); i = grid_points[0] - 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 5.0 * ue[m][i]); } } } for (k = 1; k <= grid_points[2] - 2; k++) { zeta = (double) k * dnzm1; for (i = 1; i <= grid_points[0] - 2; i++) { xi = (double) i * dnxm1; for (j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[m][j] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[m][j] = dtpp * dtemp[m]; } cuf[j] = buf[2][j] * buf[2][j]; buf[0][j] = cuf[j] + buf[1][j] * buf[1][j] + buf[3][j] * buf[3][j]; q[j] = 0.5 * (buf[1][j] * ue[1][j] + buf[2][j] * ue[2][j] + buf[3][j] * ue[3][j]); } for (j = 1; j <= grid_points[1] - 2; j++) { jm1 = j - 1; jp1 = j + 1; forcing[0][i][j][k] = forcing[0][i][j][k] - ty2 * (ue[2][jp1] - ue[2][jm1]) + dy1ty1 * (ue[0][jp1] - 2.0 * ue[0][j] + ue[0][jm1]); forcing[1][i][j][k] = forcing[1][i][j][k] - ty2 * (ue[1][jp1] * buf[2][jp1] - ue[1][jm1] * buf[2][jm1]) + yycon2 * (buf[1][jp1] - 2.0 * buf[1][j] + buf[1][jm1]) + dy2ty1 * (ue[1][jp1] - 2.0 * ue[1][j] + ue[1][jm1]); forcing[2][i][j][k] = forcing[2][i][j][k] - ty2 * ((ue[2][jp1] * buf[2][jp1] + c2 * (ue[4][jp1] - q[jp1])) - (ue[2][jm1] * buf[2][jm1] + c2 * (ue[4][jm1] - q[jm1]))) + yycon1 * (buf[2][jp1] - 2.0 * buf[2][j] + buf[2][jm1]) + dy3ty1 * (ue[2][jp1] - 2.0 * ue[2][j] + ue[2][jm1]); forcing[3][i][j][k] = forcing[3][i][j][k] - ty2 * (ue[3][jp1] * buf[2][jp1] - ue[3][jm1] * buf[2][jm1]) + yycon2 * (buf[3][jp1] - 2.0 * buf[3][j] + buf[3][jm1]) + dy4ty1 * (ue[3][jp1] - 2.0 * ue[3][j] + ue[3][jm1]); forcing[4][i][j][k] = forcing[4][i][j][k] - ty2 * (buf[2][jp1] * (c1 * ue[4][jp1] - c2 * q[jp1]) - buf[2][jm1] * (c1 * ue[4][jm1] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[0][jp1] - 2.0 * buf[0][j] + buf[0][jm1]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[4][jp1] - 2.0 * buf[4][j] + buf[4][jm1]) + dy5ty1 * (ue[4][jp1] - 2.0 * ue[4][j] + ue[4][jm1]); } for (m = 0; m < 5; m++) { j = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]); j = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]); } for (m = 0; m < 5; m++) { for (j = 3; j <= grid_points[1] - 4; j++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]); } } for (m = 0; m < 5; m++) { j = grid_points[1] - 3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1]); j = grid_points[1] - 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 5.0 * ue[m][j]); } } } for (j = 1; j <= grid_points[1] - 2; j++) { eta = (double) j * dnym1; for (i = 1; i <= grid_points[0] - 2; i++) { xi = (double) i * dnxm1; for (k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[m][k] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[m][k] = dtpp * dtemp[m]; } cuf[k] = buf[3][k] * buf[3][k]; buf[0][k] = cuf[k] + buf[1][k] * buf[1][k] + buf[2][k] * buf[2][k]; q[k] = 0.5 * (buf[1][k] * ue[1][k] + buf[2][k] * ue[2][k] + buf[3][k] * ue[3][k]); } for (k = 1; k <= grid_points[2] - 2; k++) { km1 = k - 1; kp1 = k + 1; forcing[0][i][j][k] = forcing[0][i][j][k] - tz2 * (ue[3][kp1] - ue[3][km1]) + dz1tz1 * (ue[0][kp1] - 2.0 * ue[0][k] + ue[0][km1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tz2 * (ue[1][kp1] * buf[3][kp1] - ue[1][km1] * buf[3][km1]) + zzcon2 * (buf[1][kp1] - 2.0 * buf[1][k] + buf[1][km1]) + dz2tz1 * (ue[1][kp1] - 2.0 * ue[1][k] + ue[1][km1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tz2 * (ue[2][kp1] * buf[3][kp1] - ue[2][km1] * buf[3][km1]) + zzcon2 * (buf[2][kp1] - 2.0 * buf[2][k] + buf[2][km1]) + dz3tz1 * (ue[2][kp1] - 2.0 * ue[2][k] + ue[2][km1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tz2 * ((ue[3][kp1] * buf[3][kp1] + c2 * (ue[4][kp1] - q[kp1])) - (ue[3][km1] * buf[3][km1] + c2 * (ue[4][km1] - q[km1]))) + zzcon1 * (buf[3][kp1] - 2.0 * buf[3][k] + buf[3][km1]) + dz4tz1 * (ue[3][kp1] - 2.0 * ue[3][k] + ue[3][km1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tz2 * (buf[3][kp1] * (c1 * ue[4][kp1] - c2 * q[kp1]) - buf[3][km1] * (c1 * ue[4][km1] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[0][kp1] - 2.0 * buf[0][k] + buf[0][km1]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[4][kp1] - 2.0 * buf[4][k] + buf[4][km1]) + dz5tz1 * (ue[4][kp1] - 2.0 * ue[4][k] + ue[4][km1]); } for (m = 0; m < 5; m++) { k = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]); k = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]); } for (m = 0; m < 5; m++) { for (k = 3; k <= grid_points[2] - 4; k++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]); } } for (m = 0; m < 5; m++) { k = grid_points[2] - 3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1]); k = grid_points[2] - 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 5.0 * ue[m][k]); } } } for (m = 0; m < 5; m++) { for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k]; } } } } } static void exact_solution(double xi, double eta , double zeta , double dtemp[5]) { int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[0][m] + xi * (ce[1][m] + xi * (ce[4][m] + xi * (ce[7][m] + xi * ce[10][m]))) + eta * (ce[2][m] + eta * (ce[5][m] + eta * (ce[8][m] + eta * ce[11][m]))) + zeta * (ce[3][m] + zeta * (ce[6][m] + zeta * (ce[9][m] + zeta * ce[12][m]))); } } static void initialize(void ) { int i; int j; int k; int m; int ix; int iy; int iz; double xi; double eta; double zeta; double Pface[2][3][5]; double Pxi; double Peta; double Pzeta; double temp[5]; for (i = 0; i <= 12 - 1; i++) { for (j = 0; j <= 12 - 1; j++) { for (k = 0; k <= 12 - 1; k++) { u[0][i][j][k] = 1.0; u[1][i][j][k] = 0.0; u[2][i][j][k] = 0.0; u[3][i][j][k] = 0.0; u[4][i][j][k] = 1.0; } } } for (i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; for (j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; for (k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; for (ix = 0; ix < 2; ix++) { double *_imopVarPre199; double _imopVarPre200; _imopVarPre199 = &Pface[ix][0][0]; _imopVarPre200 = (double) ix; exact_solution(_imopVarPre200, eta, zeta, _imopVarPre199); } for (iy = 0; iy < 2; iy++) { double *_imopVarPre203; double _imopVarPre204; _imopVarPre203 = &Pface[iy][1][0]; _imopVarPre204 = (double) iy; exact_solution(xi, _imopVarPre204, zeta, _imopVarPre203); } for (iz = 0; iz < 2; iz++) { double *_imopVarPre207; double _imopVarPre208; _imopVarPre207 = &Pface[iz][2][0]; _imopVarPre208 = (double) iz; exact_solution(xi, eta, _imopVarPre208, _imopVarPre207); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m]; u[m][i][j][k] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta; } } } } xi = 0.0; i = 0; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } xi = 1.0; i = grid_points[0] - 1; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } eta = 0.0; j = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } eta = 1.0; j = grid_points[1] - 1; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } zeta = 0.0; k = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } zeta = 1.0; k = grid_points[2] - 1; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } } static void lhsinit(void ) { int i; int j; int k; int n; for (n = 0; n < 15; n++) { #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { lhs[n][i][j][k] = 0.0; } } } } for (n = 0; n < 3; n++) { #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { lhs[5 * n + 2][i][j][k] = 1.0; } } } } } static void lhsx(void ) { double ru1; int i; int j; int k; for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { #pragma omp for nowait for (i = 0; i <= grid_points[0] - 1; i++) { ru1 = c3c4 * rho_i[i][j][k]; cv[i] = us[i][j][k]; int _imopVarPre719; double _imopVarPre720; int _imopVarPre721; double _imopVarPre722; int _imopVarPre729; double _imopVarPre730; int _imopVarPre731; double _imopVarPre732; int _imopVarPre825; double _imopVarPre826; int _imopVarPre827; double _imopVarPre828; int _imopVarPre835; double _imopVarPre836; _imopVarPre719 = ((dxmax + ru1) > dx1); if (_imopVarPre719) { _imopVarPre720 = (dxmax + ru1); } else { _imopVarPre720 = dx1; } _imopVarPre721 = ((dx5 + c1c5 * ru1) > _imopVarPre720); if (_imopVarPre721) { _imopVarPre722 = (dx5 + c1c5 * ru1); } else { _imopVarPre729 = ((dxmax + ru1) > dx1); if (_imopVarPre729) { _imopVarPre730 = (dxmax + ru1); } else { _imopVarPre730 = dx1; } _imopVarPre722 = _imopVarPre730; } _imopVarPre731 = ((dx2 + con43 * ru1) > _imopVarPre722); if (_imopVarPre731) { _imopVarPre732 = (dx2 + con43 * ru1); } else { _imopVarPre825 = ((dxmax + ru1) > dx1); if (_imopVarPre825) { _imopVarPre826 = (dxmax + ru1); } else { _imopVarPre826 = dx1; } _imopVarPre827 = ((dx5 + c1c5 * ru1) > _imopVarPre826); if (_imopVarPre827) { _imopVarPre828 = (dx5 + c1c5 * ru1); } else { _imopVarPre835 = ((dxmax + ru1) > dx1); if (_imopVarPre835) { _imopVarPre836 = (dxmax + ru1); } else { _imopVarPre836 = dx1; } _imopVarPre828 = _imopVarPre836; } _imopVarPre732 = _imopVarPre828; } rhon[i] = _imopVarPre732; } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhon.f, cv.f]) read([cv, i, dttx2, rhon, lhs.f, dttx1, rhon.f, grid_points.f, c2dttx1, grid_points, lhs, cv.f]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dttx2 * cv[i - 1] - dttx1 * rhon[i - 1]; lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i]; lhs[3][i][j][k] = dttx2 * cv[i + 1] - dttx1 * rhon[i + 1]; lhs[4][i][j][k] = 0.0; } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([_imopVarPre731, j, _imopVarPre721, comz1, rho_i, rhon, _imopVarPre719, lhs.f, comz5, _imopVarPre729, grid_points.f, dxmax, us, dx1, c1c5, dx5, cv, i, _imopVarPre825, comz4, _imopVarPre835, comz6, _imopVarPre827, con43, grid_points, lhs, us.f, c3c4, rho_i.f, dx2]) #pragma omp barrier } } i = 1; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4; lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz6; lhs[3][i + 1][j][k] = lhs[3][i + 1][j][k] - comz4; lhs[4][i + 1][j][k] = lhs[4][i + 1][j][k] + comz1; } } #pragma omp for nowait for (i = 3; i <= grid_points[0] - 4; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } i = grid_points[0] - 3; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i + 1][j][k] = lhs[0][i + 1][j][k] + comz1; lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4; lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz5; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhon.f, cv.f]) read([i, dttx2, lhs.f, speed, grid_points.f, grid_points, speed.f, lhs]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0 + 5][i][j][k] = lhs[0][i][j][k]; lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttx2 * speed[i - 1][j][k]; lhs[2 + 5][i][j][k] = lhs[2][i][j][k]; lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttx2 * speed[i + 1][j][k]; lhs[4 + 5][i][j][k] = lhs[4][i][j][k]; lhs[0 + 10][i][j][k] = lhs[0][i][j][k]; lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttx2 * speed[i - 1][j][k]; lhs[2 + 10][i][j][k] = lhs[2][i][j][k]; lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttx2 * speed[i + 1][j][k]; lhs[4 + 10][i][j][k] = lhs[4][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([rhs.f, rhs, lhs.f, j, grid_points.f, grid_points, lhs]) #pragma omp barrier } static void lhsy(void ) { double ru1; int i; int j; int k; for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { #pragma omp for nowait for (j = 0; j <= grid_points[1] - 1; j++) { ru1 = c3c4 * rho_i[i][j][k]; cv[j] = vs[i][j][k]; int _imopVarPre1347; double _imopVarPre1348; int _imopVarPre1349; double _imopVarPre1350; int _imopVarPre1357; double _imopVarPre1358; int _imopVarPre1359; double _imopVarPre1360; int _imopVarPre1453; double _imopVarPre1454; int _imopVarPre1455; double _imopVarPre1456; int _imopVarPre1463; double _imopVarPre1464; _imopVarPre1347 = ((dymax + ru1) > dy1); if (_imopVarPre1347) { _imopVarPre1348 = (dymax + ru1); } else { _imopVarPre1348 = dy1; } _imopVarPre1349 = ((dy5 + c1c5 * ru1) > _imopVarPre1348); if (_imopVarPre1349) { _imopVarPre1350 = (dy5 + c1c5 * ru1); } else { _imopVarPre1357 = ((dymax + ru1) > dy1); if (_imopVarPre1357) { _imopVarPre1358 = (dymax + ru1); } else { _imopVarPre1358 = dy1; } _imopVarPre1350 = _imopVarPre1358; } _imopVarPre1359 = ((dy3 + con43 * ru1) > _imopVarPre1350); if (_imopVarPre1359) { _imopVarPre1360 = (dy3 + con43 * ru1); } else { _imopVarPre1453 = ((dymax + ru1) > dy1); if (_imopVarPre1453) { _imopVarPre1454 = (dymax + ru1); } else { _imopVarPre1454 = dy1; } _imopVarPre1455 = ((dy5 + c1c5 * ru1) > _imopVarPre1454); if (_imopVarPre1455) { _imopVarPre1456 = (dy5 + c1c5 * ru1); } else { _imopVarPre1463 = ((dymax + ru1) > dy1); if (_imopVarPre1463) { _imopVarPre1464 = (dymax + ru1); } else { _imopVarPre1464 = dy1; } _imopVarPre1456 = _imopVarPre1464; } _imopVarPre1360 = _imopVarPre1456; } rhoq[j] = _imopVarPre1360; } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhoq.f, cv.f]) read([cv, j, lhs.f, dtty1, dtty2, rhoq.f, grid_points.f, rhoq, grid_points, lhs, cv.f, c2dtty1]) #pragma omp barrier #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dtty2 * cv[j - 1] - dtty1 * rhoq[j - 1]; lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j]; lhs[3][i][j][k] = dtty2 * cv[j + 1] - dtty1 * rhoq[j + 1]; lhs[4][i][j][k] = 0.0; } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([comz1, rho_i, lhs.f, comz5, grid_points.f, vs, j, c1c5, dy1, dy3, _imopVarPre1349, dy5, _imopVarPre1359, _imopVarPre1347, _imopVarPre1357, cv, dymax, comz4, comz6, con43, grid_points, rhoq, lhs, vs.f, _imopVarPre1455, i, c3c4, _imopVarPre1453, rho_i.f, _imopVarPre1463]) #pragma omp barrier } } j = 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4; lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz6; lhs[3][i][j + 1][k] = lhs[3][i][j + 1][k] - comz4; lhs[4][i][j + 1][k] = lhs[4][i][j + 1][k] + comz1; } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 3; j <= grid_points[1] - 4; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } j = grid_points[1] - 3; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j + 1][k] = lhs[0][i][j + 1][k] + comz1; lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4; lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz5; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhoq.f, cv.f]) read([lhs.f, i, speed, dtty2, grid_points.f, grid_points, speed.f, lhs]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0 + 5][i][j][k] = lhs[0][i][j][k]; lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dtty2 * speed[i][j - 1][k]; lhs[2 + 5][i][j][k] = lhs[2][i][j][k]; lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dtty2 * speed[i][j + 1][k]; lhs[4 + 5][i][j][k] = lhs[4][i][j][k]; lhs[0 + 10][i][j][k] = lhs[0][i][j][k]; lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dtty2 * speed[i][j - 1][k]; lhs[2 + 10][i][j][k] = lhs[2][i][j][k]; lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dtty2 * speed[i][j + 1][k]; lhs[4 + 10][i][j][k] = lhs[4][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([rhs.f, i, rhs, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } static void lhsz(void ) { double ru1; int i; int j; int k; for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { #pragma omp for nowait for (k = 0; k <= grid_points[2] - 1; k++) { ru1 = c3c4 * rho_i[i][j][k]; cv[k] = ws[i][j][k]; int _imopVarPre1975; double _imopVarPre1976; int _imopVarPre1977; double _imopVarPre1978; int _imopVarPre1985; double _imopVarPre1986; int _imopVarPre1987; double _imopVarPre1988; int _imopVarPre2081; double _imopVarPre2082; int _imopVarPre2083; double _imopVarPre2084; int _imopVarPre2091; double _imopVarPre2092; _imopVarPre1975 = ((dzmax + ru1) > dz1); if (_imopVarPre1975) { _imopVarPre1976 = (dzmax + ru1); } else { _imopVarPre1976 = dz1; } _imopVarPre1977 = ((dz5 + c1c5 * ru1) > _imopVarPre1976); if (_imopVarPre1977) { _imopVarPre1978 = (dz5 + c1c5 * ru1); } else { _imopVarPre1985 = ((dzmax + ru1) > dz1); if (_imopVarPre1985) { _imopVarPre1986 = (dzmax + ru1); } else { _imopVarPre1986 = dz1; } _imopVarPre1978 = _imopVarPre1986; } _imopVarPre1987 = ((dz4 + con43 * ru1) > _imopVarPre1978); if (_imopVarPre1987) { _imopVarPre1988 = (dz4 + con43 * ru1); } else { _imopVarPre2081 = ((dzmax + ru1) > dz1); if (_imopVarPre2081) { _imopVarPre2082 = (dzmax + ru1); } else { _imopVarPre2082 = dz1; } _imopVarPre2083 = ((dz5 + c1c5 * ru1) > _imopVarPre2082); if (_imopVarPre2083) { _imopVarPre2084 = (dz5 + c1c5 * ru1); } else { _imopVarPre2091 = ((dzmax + ru1) > dz1); if (_imopVarPre2091) { _imopVarPre2092 = (dzmax + ru1); } else { _imopVarPre2092 = dz1; } _imopVarPre2084 = _imopVarPre2092; } _imopVarPre1988 = _imopVarPre2084; } rhos[k] = _imopVarPre1988; } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhos.f, cv.f]) read([cv, k, dttz1, lhs.f, dttz2, grid_points.f, grid_points, lhs, rhos.f, c2dttz1, cv.f, rhos]) #pragma omp barrier #pragma omp for nowait for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dttz2 * cv[k - 1] - dttz1 * rhos[k - 1]; lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k]; lhs[3][i][j][k] = dttz2 * cv[k + 1] - dttz1 * rhos[k + 1]; lhs[4][i][j][k] = 0.0; } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([k, rho_i, comz1, i, lhs.f, comz5, grid_points.f, ws, dz1, c1c5, dz5, cv, comz4, comz6, dzmax, con43, grid_points, lhs, rhos, _imopVarPre1977, _imopVarPre1987, _imopVarPre1975, _imopVarPre1985, ws.f, c3c4, _imopVarPre2081, rho_i.f, _imopVarPre2083, dz4, _imopVarPre2091]) #pragma omp barrier } } k = 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4; lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz6; lhs[3][i][j][k + 1] = lhs[3][i][j][k + 1] - comz4; lhs[4][i][j][k + 1] = lhs[4][i][j][k + 1] + comz1; } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 3; k <= grid_points[2] - 4; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } k = grid_points[2] - 3; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j][k + 1] = lhs[0][i][j][k + 1] + comz1; lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4; lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz5; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhos.f, cv.f]) read([i, lhs.f, dttz2, speed, grid_points.f, grid_points, speed.f, lhs]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0 + 5][i][j][k] = lhs[0][i][j][k]; lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttz2 * speed[i][j][k - 1]; lhs[2 + 5][i][j][k] = lhs[2][i][j][k]; lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttz2 * speed[i][j][k + 1]; lhs[4 + 5][i][j][k] = lhs[4][i][j][k]; lhs[0 + 10][i][j][k] = lhs[0][i][j][k]; lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttz2 * speed[i][j][k - 1]; lhs[2 + 10][i][j][k] = lhs[2][i][j][k]; lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttz2 * speed[i][j][k + 1]; lhs[4 + 10][i][j][k] = lhs[4][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([rhs.f, rhs, i, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } static void ninvr(void ) { int i; int j; int k; double r1; double r2; double r3; double r4; double r5; double t1; double t2; #pragma omp parallel default(shared) private(i, j, k, r1, r2, r3, r4, r5, t1, t2) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r3; t2 = 0.5 * (r4 + r5); rhs[0][i][j][k] = -r2; rhs[1][i][j][k] = r1; rhs[2][i][j][k] = bt * (r4 - r5); rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } } static void pinvr(void ) { int i; int j; int k; double r1; double r2; double r3; double r4; double r5; double t1; double t2; #pragma omp parallel default(shared) private(i, j, k, r1, r2, r3, r4, r5, t1, t2) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r1; t2 = 0.5 * (r4 + r5); rhs[0][i][j][k] = bt * (r4 - r5); rhs[1][i][j][k] = -r3; rhs[2][i][j][k] = r2; rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } } static void compute_rhs(void ) { #pragma omp parallel { int i; int j; int k; int m; double aux; double rho_inv; double uijk; double up1; double um1; double vijk; double vp1; double vm1; double wijk; double wp1; double wm1; #pragma omp for nowait for (i = 0; i <= grid_points[0] - 1; i++) { for (j = 0; j <= grid_points[1] - 1; j++) { for (k = 0; k <= grid_points[2] - 1; k++) { rho_inv = 1.0 / u[0][i][j][k]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[1][i][j][k] * rho_inv; vs[i][j][k] = u[2][i][j][k] * rho_inv; ws[i][j][k] = u[3][i][j][k] * rho_inv; square[i][j][k] = 0.5 * (u[1][i][j][k] * u[1][i][j][k] + u[2][i][j][k] * u[2][i][j][k] + u[3][i][j][k] * u[3][i][j][k]) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; aux = c1c2 * rho_inv * (u[4][i][j][k] - square[i][j][k]); aux = sqrt(aux); speed[i][j][k] = aux; ainv[i][j][k] = 1.0 / aux; } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 0; i <= grid_points[0] - 1; i++) { for (j = 0; j <= grid_points[1] - 1; j++) { for (k = 0; k <= grid_points[2] - 1; k++) { rhs[m][i][j][k] = forcing[m][i][j][k]; } } } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { uijk = us[i][j][k]; up1 = us[i + 1][j][k]; um1 = us[i - 1][j][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * (u[0][i + 1][j][k] - 2.0 * u[0][i][j][k] + u[0][i - 1][j][k]) - tx2 * (u[1][i + 1][j][k] - u[1][i - 1][j][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * (u[1][i + 1][j][k] - 2.0 * u[1][i][j][k] + u[1][i - 1][j][k]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[1][i + 1][j][k] * up1 - u[1][i - 1][j][k] * um1 + (u[4][i + 1][j][k] - square[i + 1][j][k] - u[4][i - 1][j][k] + square[i - 1][j][k]) * c2); rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * (u[2][i + 1][j][k] - 2.0 * u[2][i][j][k] + u[2][i - 1][j][k]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[2][i + 1][j][k] * up1 - u[2][i - 1][j][k] * um1); rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * (u[3][i + 1][j][k] - 2.0 * u[3][i][j][k] + u[3][i - 1][j][k]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[3][i + 1][j][k] * up1 - u[3][i - 1][j][k] * um1); rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * (u[4][i + 1][j][k] - 2.0 * u[4][i][j][k] + u[4][i - 1][j][k]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[4][i + 1][j][k] * rho_i[i + 1][j][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i - 1][j][k] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[4][i + 1][j][k] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[4][i - 1][j][k] - c2 * square[i - 1][j][k]) * um1); } } } i = 1; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, i, grid_points.f, grid_points]) #pragma omp barrier } i = 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, i, grid_points.f, grid_points]) #pragma omp barrier } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 3 * 1; i <= grid_points[0] - 3 * 1 - 1; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]); } } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, i, grid_points.f, grid_points]) #pragma omp barrier } i = grid_points[0] - 3; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, grid_points.f, grid_points]) #pragma omp barrier } i = grid_points[0] - 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 5.0 * u[m][i][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, grid_points.f, grid_points]) #pragma omp barrier } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([verified, comz5, dy3ty1, _imopVarPre2174, qs, lhsx, yycon2, _imopVarPre2166, us, speed, comz4, dttx2, _imopVarPre2173, qs.f, tz2, dy1ty1, _imopVarPre2186, us.f, _imopVarPre2167, c3c4, _imopVarPre2179, speed.f, _imopVarPre731, _imopVarPre2162, lhs.f, square.f, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, dx1, sqrt, dt, _imopVarPre835, comz6, i, rhon.f, ninvr, c_print_results, zzcon3, lhs, ty2, dz4tz1, j, txinvr, rhs.f, _imopVarPre721, ainv, dy2ty1, c1, comz1, _imopVarPre2160, printf, rhon, _imopVarPre729, zzcon4, i, _imopVarPre2180, ws, c1c5, dy4ty1, m, rhs, c2, _imopVarPre825, _imopVarPre2161, ainv.f, square, pow, con43, c2dttx1, grid_points, zzcon5, yycon5, x_solve, ws.f, rho_i.f, dx2, u.f, j, i, dz1tz1, rho_i, _imopVarPre719, dttx1, dxmax, _imopVarPre2185, yycon4, _imopVarPre2168, vs, _imopVarPre2178, dx5, xcr.f, cv, u, bt, i, dz3tz1, _imopVarPre827, fabs, dy5ty1, yycon3, dssp, vs.f, _imopVarPre2184, dz5tz1, cv.f]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j + 1][k]; vm1 = vs[i][j - 1][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * (u[0][i][j + 1][k] - 2.0 * u[0][i][j][k] + u[0][i][j - 1][k]) - ty2 * (u[2][i][j + 1][k] - u[2][i][j - 1][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * (u[1][i][j + 1][k] - 2.0 * u[1][i][j][k] + u[1][i][j - 1][k]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[1][i][j + 1][k] * vp1 - u[1][i][j - 1][k] * vm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * (u[2][i][j + 1][k] - 2.0 * u[2][i][j][k] + u[2][i][j - 1][k]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[2][i][j + 1][k] * vp1 - u[2][i][j - 1][k] * vm1 + (u[4][i][j + 1][k] - square[i][j + 1][k] - u[4][i][j - 1][k] + square[i][j - 1][k]) * c2); rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * (u[3][i][j + 1][k] - 2.0 * u[3][i][j][k] + u[3][i][j - 1][k]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[3][i][j + 1][k] * vp1 - u[3][i][j - 1][k] * vm1); rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * (u[4][i][j + 1][k] - 2.0 * u[4][i][j][k] + u[4][i][j - 1][k]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[4][i][j + 1][k] * rho_i[i][j + 1][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j - 1][k] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[4][i][j + 1][k] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[4][i][j - 1][k] - c2 * square[i][j - 1][k]) * vm1); } } } j = 1; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]); } } } j = 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]); } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 3 * 1; j <= grid_points[1] - 3 * 1 - 1; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]); } } } } j = grid_points[1] - 3; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k]); } } } j = grid_points[1] - 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 5.0 * u[m][i][j][k]); } } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k + 1]; wm1 = ws[i][j][k - 1]; rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * (u[0][i][j][k + 1] - 2.0 * u[0][i][j][k] + u[0][i][j][k - 1]) - tz2 * (u[3][i][j][k + 1] - u[3][i][j][k - 1]); rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * (u[1][i][j][k + 1] - 2.0 * u[1][i][j][k] + u[1][i][j][k - 1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[1][i][j][k + 1] * wp1 - u[1][i][j][k - 1] * wm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * (u[2][i][j][k + 1] - 2.0 * u[2][i][j][k] + u[2][i][j][k - 1]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[2][i][j][k + 1] * wp1 - u[2][i][j][k - 1] * wm1); rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * (u[3][i][j][k + 1] - 2.0 * u[3][i][j][k] + u[3][i][j][k - 1]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[3][i][j][k + 1] * wp1 - u[3][i][j][k - 1] * wm1 + (u[4][i][j][k + 1] - square[i][j][k + 1] - u[4][i][j][k - 1] + square[i][j][k - 1]) * c2); rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * (u[4][i][j][k + 1] - 2.0 * u[4][i][j][k] + u[4][i][j][k - 1]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[4][i][j][k + 1] * rho_i[i][j][k + 1] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j][k - 1] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[4][i][j][k + 1] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[4][i][j][k - 1] - c2 * square[i][j][k - 1]) * wm1); } } } k = 1; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]); } } } k = 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]); } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 3 * 1; k <= grid_points[2] - 3 * 1 - 1; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]); } } } } k = grid_points[2] - 3; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1]); } } } k = grid_points[2] - 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 5.0 * u[m][i][j][k]); } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] * dt; } } } } } } static void set_constants(void ) { ce[0][0] = 2.0; ce[1][0] = 0.0; ce[2][0] = 0.0; ce[3][0] = 4.0; ce[4][0] = 5.0; ce[5][0] = 3.0; ce[6][0] = 0.5; ce[7][0] = 0.02; ce[8][0] = 0.01; ce[9][0] = 0.03; ce[10][0] = 0.5; ce[11][0] = 0.4; ce[12][0] = 0.3; ce[0][1] = 1.0; ce[1][1] = 0.0; ce[2][1] = 0.0; ce[3][1] = 0.0; ce[4][1] = 1.0; ce[5][1] = 2.0; ce[6][1] = 3.0; ce[7][1] = 0.01; ce[8][1] = 0.03; ce[9][1] = 0.02; ce[10][1] = 0.4; ce[11][1] = 0.3; ce[12][1] = 0.5; ce[0][2] = 2.0; ce[1][2] = 2.0; ce[2][2] = 0.0; ce[3][2] = 0.0; ce[4][2] = 0.0; ce[5][2] = 2.0; ce[6][2] = 3.0; ce[7][2] = 0.04; ce[8][2] = 0.03; ce[9][2] = 0.05; ce[10][2] = 0.3; ce[11][2] = 0.5; ce[12][2] = 0.4; ce[0][3] = 2.0; ce[1][3] = 2.0; ce[2][3] = 0.0; ce[3][3] = 0.0; ce[4][3] = 0.0; ce[5][3] = 2.0; ce[6][3] = 3.0; ce[7][3] = 0.03; ce[8][3] = 0.05; ce[9][3] = 0.04; ce[10][3] = 0.2; ce[11][3] = 0.1; ce[12][3] = 0.3; ce[0][4] = 5.0; ce[1][4] = 4.0; ce[2][4] = 3.0; ce[3][4] = 2.0; ce[4][4] = 0.1; ce[5][4] = 0.4; ce[6][4] = 0.3; ce[7][4] = 0.05; ce[8][4] = 0.04; ce[9][4] = 0.03; ce[10][4] = 0.1; ce[11][4] = 0.3; ce[12][4] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; bt = sqrt(0.5); dnxm1 = 1.0 / (double) (grid_points[0] - 1); dnym1 = 1.0 / (double) (grid_points[1] - 1); dnzm1 = 1.0 / (double) (grid_points[2] - 1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0 - c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; int _imopVarPre2095; double _imopVarPre2096; _imopVarPre2095 = (dx3 > dx4); if (_imopVarPre2095) { _imopVarPre2096 = dx3; } else { _imopVarPre2096 = dx4; } dxmax = _imopVarPre2096; int _imopVarPre2099; double _imopVarPre2100; _imopVarPre2099 = (dy2 > dy4); if (_imopVarPre2099) { _imopVarPre2100 = dy2; } else { _imopVarPre2100 = dy4; } dymax = _imopVarPre2100; int _imopVarPre2103; double _imopVarPre2104; _imopVarPre2103 = (dz2 > dz3); if (_imopVarPre2103) { _imopVarPre2104 = dz2; } else { _imopVarPre2104 = dz3; } dzmax = _imopVarPre2104; int _imopVarPre2145; double _imopVarPre2146; int _imopVarPre2147; double _imopVarPre2148; int _imopVarPre2155; double _imopVarPre2156; _imopVarPre2145 = (dy1 > dz1); if (_imopVarPre2145) { _imopVarPre2146 = dy1; } else { _imopVarPre2146 = dz1; } _imopVarPre2147 = (dx1 > _imopVarPre2146); if (_imopVarPre2147) { _imopVarPre2148 = dx1; } else { _imopVarPre2155 = (dy1 > dz1); if (_imopVarPre2155) { _imopVarPre2156 = dy1; } else { _imopVarPre2156 = dz1; } _imopVarPre2148 = _imopVarPre2156; } dssp = 0.25 * _imopVarPre2148; c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt * tx1; dttx2 = dt * tx2; dtty1 = dt * ty1; dtty2 = dt * ty2; dttz1 = dt * tz1; dttz2 = dt * tz2; c2dttx1 = 2.0 * dttx1; c2dtty1 = 2.0 * dtty1; c2dttz1 = 2.0 * dttz1; dtdssp = dt * dssp; comz1 = dtdssp; comz4 = 4.0 * dtdssp; comz5 = 5.0 * dtdssp; comz6 = 6.0 * dtdssp; c3c4tx3 = c3c4 * tx3; c3c4ty3 = c3c4 * ty3; c3c4tz3 = c3c4 * tz3; dx1tx1 = dx1 * tx1; dx2tx1 = dx2 * tx1; dx3tx1 = dx3 * tx1; dx4tx1 = dx4 * tx1; dx5tx1 = dx5 * tx1; dy1ty1 = dy1 * ty1; dy2ty1 = dy2 * ty1; dy3ty1 = dy3 * ty1; dy4ty1 = dy4 * ty1; dy5ty1 = dy5 * ty1; dz1tz1 = dz1 * tz1; dz2tz1 = dz2 * tz1; dz3tz1 = dz3 * tz1; dz4tz1 = dz4 * tz1; dz5tz1 = dz5 * tz1; c2iv = 2.5; con43 = 4.0 / 3.0; con16 = 1.0 / 6.0; xxcon1 = c3c4tx3 * con43 * tx3; xxcon2 = c3c4tx3 * tx3; xxcon3 = c3c4tx3 * conz1 * tx3; xxcon4 = c3c4tx3 * con16 * tx3; xxcon5 = c3c4tx3 * c1c5 * tx3; yycon1 = c3c4ty3 * con43 * ty3; yycon2 = c3c4ty3 * ty3; yycon3 = c3c4ty3 * conz1 * ty3; yycon4 = c3c4ty3 * con16 * ty3; yycon5 = c3c4ty3 * c1c5 * ty3; zzcon1 = c3c4tz3 * con43 * tz3; zzcon2 = c3c4tz3 * tz3; zzcon3 = c3c4tz3 * conz1 * tz3; zzcon4 = c3c4tz3 * con16 * tz3; zzcon5 = c3c4tz3 * c1c5 * tz3; } static void txinvr(void ) { int i; int j; int k; double t1; double t2; double t3; double ac; double ru1; double uu; double vv; double ww; double r1; double r2; double r3; double r4; double r5; double ac2inv; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { ru1 = rho_i[i][j][k]; uu = us[i][j][k]; vv = vs[i][j][k]; ww = ws[i][j][k]; ac = speed[i][j][k]; ac2inv = ainv[i][j][k] * ainv[i][j][k]; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = c2 * ac2inv * (qs[i][j][k] * r1 - uu * r2 - vv * r3 - ww * r4 + r5); t2 = bt * ru1 * (uu * r1 - r2); t3 = (bt * ru1 * ac) * t1; rhs[0][i][j][k] = r1 - t1; rhs[1][i][j][k] = -ru1 * (ww * r1 - r4); rhs[2][i][j][k] = ru1 * (vv * r1 - r3); rhs[3][i][j][k] = -t2 + t3; rhs[4][i][j][k] = t2 + t3; } } } } static void tzetar(void ) { int i; int j; int k; double t1; double t2; double t3; double ac; double xvel; double yvel; double zvel; double r1; double r2; double r3; double r4; double r5; double btuz; double acinv; double ac2u; double uzik1; #pragma omp for private(i, j, k, t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5, btuz, ac2u, uzik1) nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { xvel = us[i][j][k]; yvel = vs[i][j][k]; zvel = ws[i][j][k]; ac = speed[i][j][k]; acinv = ainv[i][j][k]; ac2u = ac * ac; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; uzik1 = u[0][i][j][k]; btuz = bt * uzik1; t1 = btuz * acinv * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[0][i][j][k] = t2; rhs[1][i][j][k] = -uzik1 * r2 + xvel * t2; rhs[2][i][j][k] = uzik1 * r1 + yvel * t2; rhs[3][i][j][k] = zvel * t2 + t3; rhs[4][i][j][k] = uzik1 * (-xvel * r2 + yvel * r1) + qs[i][j][k] * t2 + c2iv * ac2u * t1 + zvel * t3; } } } } static void verify(int no_time_steps, char *class , boolean *verified) { double xcrref[5]; double xceref[5]; double xcrdif[5]; double xcedif[5]; double epsilon; double xce[5]; double xcr[5]; double dtref; int m; epsilon = 1.0e-08; error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = 1; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } int _imopVarPre2160; int _imopVarPre2161; int _imopVarPre2162; _imopVarPre2160 = grid_points[0] == 12; if (_imopVarPre2160) { _imopVarPre2161 = grid_points[1] == 12; if (_imopVarPre2161) { _imopVarPre2162 = grid_points[2] == 12; if (_imopVarPre2162) { _imopVarPre2162 = no_time_steps == 100; } _imopVarPre2161 = _imopVarPre2162; } _imopVarPre2160 = _imopVarPre2161; } if (_imopVarPre2160) { *class = 'S'; dtref = 1.5e-2; xcrref[0] = 2.7470315451339479e-02; xcrref[1] = 1.0360746705285417e-02; xcrref[2] = 1.6235745065095532e-02; xcrref[3] = 1.5840557224455615e-02; xcrref[4] = 3.4849040609362460e-02; xceref[0] = 2.7289258557377227e-05; xceref[1] = 1.0364446640837285e-05; xceref[2] = 1.6154798287166471e-05; xceref[3] = 1.5750704994480102e-05; xceref[4] = 3.4177666183390531e-05; } else { int _imopVarPre2166; int _imopVarPre2167; int _imopVarPre2168; _imopVarPre2166 = grid_points[0] == 36; if (_imopVarPre2166) { _imopVarPre2167 = grid_points[1] == 36; if (_imopVarPre2167) { _imopVarPre2168 = grid_points[2] == 36; if (_imopVarPre2168) { _imopVarPre2168 = no_time_steps == 400; } _imopVarPre2167 = _imopVarPre2168; } _imopVarPre2166 = _imopVarPre2167; } if (_imopVarPre2166) { *class = 'W'; dtref = 1.5e-3; xcrref[0] = 0.1893253733584e-02; xcrref[1] = 0.1717075447775e-03; xcrref[2] = 0.2778153350936e-03; xcrref[3] = 0.2887475409984e-03; xcrref[4] = 0.3143611161242e-02; xceref[0] = 0.7542088599534e-04; xceref[1] = 0.6512852253086e-05; xceref[2] = 0.1049092285688e-04; xceref[3] = 0.1128838671535e-04; xceref[4] = 0.1212845639773e-03; } else { int _imopVarPre2172; int _imopVarPre2173; int _imopVarPre2174; _imopVarPre2172 = grid_points[0] == 64; if (_imopVarPre2172) { _imopVarPre2173 = grid_points[1] == 64; if (_imopVarPre2173) { _imopVarPre2174 = grid_points[2] == 64; if (_imopVarPre2174) { _imopVarPre2174 = no_time_steps == 400; } _imopVarPre2173 = _imopVarPre2174; } _imopVarPre2172 = _imopVarPre2173; } if (_imopVarPre2172) { *class = 'A'; dtref = 1.5e-3; xcrref[0] = 2.4799822399300195; xcrref[1] = 1.1276337964368832; xcrref[2] = 1.5028977888770491; xcrref[3] = 1.4217816211695179; xcrref[4] = 2.1292113035138280; xceref[0] = 1.0900140297820550e-04; xceref[1] = 3.7343951769282091e-05; xceref[2] = 5.0092785406541633e-05; xceref[3] = 4.7671093939528255e-05; xceref[4] = 1.3621613399213001e-04; } else { int _imopVarPre2178; int _imopVarPre2179; int _imopVarPre2180; _imopVarPre2178 = grid_points[0] == 102; if (_imopVarPre2178) { _imopVarPre2179 = grid_points[1] == 102; if (_imopVarPre2179) { _imopVarPre2180 = grid_points[2] == 102; if (_imopVarPre2180) { _imopVarPre2180 = no_time_steps == 400; } _imopVarPre2179 = _imopVarPre2180; } _imopVarPre2178 = _imopVarPre2179; } if (_imopVarPre2178) { *class = 'B'; dtref = 1.0e-3; xcrref[0] = 0.6903293579998e+02; xcrref[1] = 0.3095134488084e+02; xcrref[2] = 0.4103336647017e+02; xcrref[3] = 0.3864769009604e+02; xcrref[4] = 0.5643482272596e+02; xceref[0] = 0.9810006190188e-02; xceref[1] = 0.1022827905670e-02; xceref[2] = 0.1720597911692e-02; xceref[3] = 0.1694479428231e-02; xceref[4] = 0.1847456263981e-01; } else { int _imopVarPre2184; int _imopVarPre2185; int _imopVarPre2186; _imopVarPre2184 = grid_points[0] == 162; if (_imopVarPre2184) { _imopVarPre2185 = grid_points[1] == 162; if (_imopVarPre2185) { _imopVarPre2186 = grid_points[2] == 162; if (_imopVarPre2186) { _imopVarPre2186 = no_time_steps == 400; } _imopVarPre2185 = _imopVarPre2186; } _imopVarPre2184 = _imopVarPre2185; } if (_imopVarPre2184) { *class = 'C'; dtref = 0.67e-3; xcrref[0] = 0.5881691581829e+03; xcrref[1] = 0.2454417603569e+03; xcrref[2] = 0.3293829191851e+03; xcrref[3] = 0.3081924971891e+03; xcrref[4] = 0.4597223799176e+03; xceref[0] = 0.2598120500183e+00; xceref[1] = 0.2590888922315e-01; xceref[2] = 0.5132886416320e-01; xceref[3] = 0.4806073419454e-01; xceref[4] = 0.5483377491301e+00; } else { *verified = 0; } } } } } for (m = 0; m < 5; m++) { double _imopVarPre2188; double _imopVarPre2189; _imopVarPre2188 = (xcr[m] - xcrref[m]) / xcrref[m]; _imopVarPre2189 = fabs(_imopVarPre2188); xcrdif[m] = _imopVarPre2189; double _imopVarPre2191; double _imopVarPre2192; _imopVarPre2191 = (xce[m] - xceref[m]) / xceref[m]; _imopVarPre2192 = fabs(_imopVarPre2191); xcedif[m] = _imopVarPre2192; } if (*class != 'U') { char _imopVarPre2194; _imopVarPre2194 = *class; printf(" Verification being performed for class %1c\n", _imopVarPre2194); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); double _imopVarPre2197; double _imopVarPre2198; _imopVarPre2197 = dt - dtref; _imopVarPre2198 = fabs(_imopVarPre2197); if (_imopVarPre2198 > epsilon) { *verified = 0; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { double _imopVarPre2200; _imopVarPre2200 = xcr[m]; printf(" %2d%20.13e\n", m, _imopVarPre2200); } else { if (xcrdif[m] > epsilon) { *verified = 0; double _imopVarPre2204; double _imopVarPre2205; double _imopVarPre2206; _imopVarPre2204 = xcrdif[m]; _imopVarPre2205 = xcrref[m]; _imopVarPre2206 = xcr[m]; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2206, _imopVarPre2205, _imopVarPre2204); } else { double _imopVarPre2210; double _imopVarPre2211; double _imopVarPre2212; _imopVarPre2210 = xcrdif[m]; _imopVarPre2211 = xcrref[m]; _imopVarPre2212 = xcr[m]; printf(" %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2212, _imopVarPre2211, _imopVarPre2210); } } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { double _imopVarPre2214; _imopVarPre2214 = xce[m]; printf(" %2d%20.13e\n", m, _imopVarPre2214); } else { if (xcedif[m] > epsilon) { *verified = 0; double _imopVarPre2218; double _imopVarPre2219; double _imopVarPre2220; _imopVarPre2218 = xcedif[m]; _imopVarPre2219 = xceref[m]; _imopVarPre2220 = xce[m]; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2220, _imopVarPre2219, _imopVarPre2218); } else { double _imopVarPre2224; double _imopVarPre2225; double _imopVarPre2226; _imopVarPre2224 = xcedif[m]; _imopVarPre2225 = xceref[m]; _imopVarPre2226 = xce[m]; printf(" %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2226, _imopVarPre2225, _imopVarPre2224); } } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else { if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } } static void x_solve(void ) { #pragma omp parallel { int i; int j; int k; int n; int i1; int i2; int m; double fac1; double fac2; lhsx(); n = 0; for (i = 0; i <= grid_points[0] - 3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; } lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, lhs.f, j, grid_points.f, grid_points, lhs]) #pragma omp barrier } i = grid_points[0] - 2; i1 = grid_points[0] - 1; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1.0 / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; } fac2 = 1. / lhs[n + 2][i1][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (i = 0; i <= grid_points[0] - 3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k]; rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, lhs.f, j, grid_points.f, grid_points, lhs]) #pragma omp barrier } i = grid_points[0] - 2; i1 = grid_points[0] - 1; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; fac2 = 1. / lhs[n + 2][i1][j][k]; rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } i = grid_points[0] - 2; i1 = grid_points[0] - 1; n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } for (m = 3; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { n = (m - 3 + 1) * 5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } n = 0; for (i = grid_points[0] - 3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (m = 0; m < 3; m++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (i = grid_points[0] - 3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, i, comz1, rho_i, i, lhs.f, comz5, dtty1, grid_points.f, rhoq.f, vs, j, dy1, c1c5, speed, dy3, pinvr, _imopVarPre1349, dy5, _imopVarPre1359, _imopVarPre1347, _imopVarPre1357, cv, bt, rhs, dymax, comz4, comz6, i, ninvr, dtty2, con43, grid_points, rhoq, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, i, c3c4, j, _imopVarPre1453, rho_i.f, _imopVarPre1463, speed.f, cv.f]) #pragma omp barrier } } } ninvr(); } static void y_solve(void ) { #pragma omp parallel { int i; int j; int k; int n; int j1; int j2; int m; double fac1; double fac2; lhsy(); n = 0; for (j = 0; j <= grid_points[1] - 3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; } lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, i, rhs, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } j = grid_points[1] - 2; j1 = grid_points[1] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; } fac2 = 1. / lhs[n + 2][i][j1][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, ainv, k, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, dttz1, rho_i, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, _imopVarPre2083, dz4, _imopVarPre2091, cv.f, i]) #pragma omp barrier for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (j = 0; j <= grid_points[1] - 3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k]; rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, i, rhs, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } j = grid_points[1] - 2; j1 = grid_points[1] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; fac2 = 1. / lhs[n + 2][i][j1][k]; rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, ainv, k, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, dttz1, rho_i, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, _imopVarPre2083, dz4, _imopVarPre2091, cv.f, i]) #pragma omp barrier } j = grid_points[1] - 2; j1 = grid_points[1] - 1; n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } for (m = 3; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { n = (m - 3 + 1) * 5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } n = 0; for (m = 0; m < 3; m++) { for (j = grid_points[1] - 3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (j = grid_points[1] - 3; j >= 0; j--) { j1 = j + 1; j2 = j1 + 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } } } pinvr(); } static void z_solve(void ) { #pragma omp parallel { int i; int j; int k; int n; int k1; int k2; int m; double fac1; double fac2; lhsz(); n = 0; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 0; k <= grid_points[2] - 3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; } lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k]; } } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, i, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier k = grid_points[2] - 2; k1 = grid_points[2] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; } fac2 = 1. / lhs[n + 2][i][j][k1]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 0; k <= grid_points[2] - 3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k]; rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, i, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier k = grid_points[2] - 2; k1 = grid_points[2] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; fac2 = 1. / lhs[n + 2][i][j][k1]; rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } k = grid_points[2] - 2; k1 = grid_points[2] - 1; n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = grid_points[2] - 3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = grid_points[2] - 3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } } tzetar(); }
ccsd_grad.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> * */ #include <stdlib.h> #include <string.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "ao2mo/nr_ao2mo.h" #define OUTPUTIJ 1 #define INPUT_IJ 2 /* * a = reduce(numpy.dot, (mo_coeff, vin, mo_coeff.T)) * numpy.tril(a + a.T) */ int CCmmm_transpose_sum(double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->nao * (envs->nao + 1) / 2; case INPUT_IJ: return envs->bra_count * envs->ket_count; } const double D0 = 0; const double D1 = 1; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; int i, j, ij; double *mo_coeff = envs->mo_coeff; // in Fortran order double *buf1 = buf + nao*j_count; dgemm_(&TRANS_N, &TRANS_T, &j_count, &nao, &i_count, &D1, vin, &j_count, mo_coeff+i_start*nao, &nao, &D0, buf, &j_count); dgemm_(&TRANS_N, &TRANS_N, &nao, &nao, &j_count, &D1, mo_coeff+j_start*nao, &nao, buf, &j_count, &D0, buf1, &nao); for (ij = 0, i = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vout[ij] = buf1[i*nao+j] + buf1[j*nao+i]; } } return 0; } /* * for (ij|kl) == (ij|lk), in lower triangle kl * (ij|kl),lk->ij * (ij|kl),jk->il */ void CVHFics2kl_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc); void CVHFics2kl_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc); void CCvhfs2kl(double *eri, double *dm, double *vj, double *vk, int ni, int nj) { const int npair = nj*(nj+1)/2; int i, j; size_t ij, off; memset(vj, 0, sizeof(double)*ni*nj); memset(vk, 0, sizeof(double)*ni*nj); #pragma omp parallel default(none) \ shared(eri, dm, vj, vk, ni, nj) \ private(ij, i, j, off) { double *vj_priv = malloc(sizeof(double)*ni*nj); double *vk_priv = malloc(sizeof(double)*ni*nj); memset(vj_priv, 0, sizeof(double)*ni*nj); memset(vk_priv, 0, sizeof(double)*ni*nj); #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < ni*nj; ij++) { i = ij / nj; j = ij - i * nj; off = ij * npair; CVHFics2kl_kl_s1ij(eri+off, dm, vj_priv, nj, i, j); CVHFics2kl_jk_s1il(eri+off, dm, vk_priv, nj, i, j); } #pragma omp critical { for (i = 0; i < ni*nj; i++) { vj[i] += vj_priv[i]; vk[i] += vk_priv[i]; } } free(vj_priv); free(vk_priv); } }
lowp_basic.c
#include <string.h> #include <math.h> #include "../thnets.h" THFloatTensor *THLowpTensor_newFromFloatTensor(THFloatTensor *t) { THFloatTensor *n = malloc(sizeof(*n)); memcpy(n, t, sizeof(*n)); if(t->storage) { n->storage = malloc(sizeof(*n->storage)); n->storage->nref = 1; n->storage->mustfree = 1; n->storageOffset = 0; int len = THFloatTensor_nElement(t) ; n->storage->data = malloc(len); float *buf = THFloatTensor_data(t); float min = 0, max = 0, mult; int i; for(i = 0; i < len; i++) { if(buf[i] < min) min = buf[i]; if(buf[i] > max) max = buf[i]; } if(max - min > 0) mult = 255.0 / (max - min); else mult = 0; unsigned char *dst = (unsigned char *)n->storage->data; for(i = 0; i < len; i++) //dst[i] = roundf((buf[i] - min) * mult); dst[i] = roundf(buf[i] * mult) - roundf(min*mult); n->sub = min; n->mult = mult; } return n; } THFloatTensor *THFloatTensor_newFromLowpTensor(THFloatTensor *t) { THFloatTensor *n = malloc(sizeof(*n)); memcpy(n, t, sizeof(*n)); if(t->storage) { n->storage = malloc(sizeof(*n->storage)); n->storage->nref = 1; n->storage->mustfree = 1; n->storageOffset = 0; int i, len = THFloatTensor_nElement(t) ; n->storage->data = (float *)malloc(len * sizeof(*n->storage->data)); unsigned char *buf = (unsigned char *)THFloatTensor_data(t); float invmult = t->mult ? 1.0 / t->mult : 0; for(i = 0; i < len; i++) n->storage->data[i] = buf[i] * invmult + t->sub; } return n; } static void rgb2tensord(unsigned char *dst, const unsigned char *src, int width, int height, int srcstride, const int *sub, const float *mult) { int c, i, j; #pragma omp parallel for private(c, i, j) for(c = 0; c < 3; c++) for(i = 0; i < height; i++) for(j = 0; j < width; j++) dst[j + (i + c * height) * width] = roundf((src[c + 3*j + srcstride*i] - sub[c]) * mult[c]); } static void bgr2tensord(unsigned char *dst, const unsigned char *src, int width, int height, int srcstride, const int *sub, const float *mult) { int c, i, j; #pragma omp parallel for private(c, i, j) for(c = 0; c < 3; c++) for(i = 0; i < height; i++) for(j = 0; j < width; j++) dst[j + (i + c * height) * width] = roundf((src[2-c + 3*j + srcstride*i] - sub[c]) * mult[c]); } THFloatTensor *Lowp_LoadImages(unsigned char **src, int nimages, int width, int height, int srcstride, const float *mean, const float *std, int bgr) { int i, sub[3]; float mult[3], min, max; THFloatTensor *n = malloc(sizeof(*n)); n->nDimension = 4; n->size[0] = nimages; n->size[1] = 3; n->size[2] = height; n->size[3] = width; n->stride[3] = 1; n->stride[2] = width; n->stride[1] = width * height; n->stride[0] = width * height * 3; n->storageOffset = 0; n->storage = (THFloatStorage *)malloc(sizeof(*n->storage)); n->storage->nref = 1; n->storage->mustfree = 1; n->storage->data = (float *)malloc(nimages * n->stride[0]); min = 1e30; max = -1e30; for(i = 0; i < 3; i++) { if(-mean[i] / std[i] < min) min = -mean[i] / std[i]; if((1-mean[i]) / std[i] > max) max = (1-mean[i]) / std[i]; } n->sub = min; if(max - min) n->mult = 255 / (max - min); else n->mult = 0; for(i = 0; i < 3; i++) { sub[i] = roundf(255 * (mean[i] + std[i] * n->sub)); mult[i] = n->mult / (255 * std[i]); } if(bgr) { #pragma omp parallel for if(nimages>1) private(i) for(i = 0; i < nimages; i++) bgr2tensord((unsigned char *)n->storage->data + i * width * height * 3, src[i], width, height, srcstride, sub, mult); } else { #pragma omp parallel for if(nimages>1) private(i) for(i = 0; i < nimages; i++) rgb2tensord((unsigned char *)n->storage->data + i * width * height * 3, src[i], width, height, srcstride, sub, mult); } return n; } struct network *THLowp_ToLowp(struct network *net, float range) { int i; struct network *nn = malloc(sizeof(*nn)); float sub = -range / 2; float mult = 255 / range; nn->nelem = net->nelem; nn->modules = malloc(sizeof(net->modules[0]) * net->nelem); nn->engine = ENGINE_LOWP; memcpy(nn->modules, net->modules, sizeof(net->modules[0]) * net->nelem); for(i = 0; i < net->nelem; i++) { nn->modules[i].output = THLowpTensor_newFromFloatTensor(net->modules[i].output); nn->modules[i].output->mult = mult; nn->modules[i].output->sub = sub; nn->modules[i].net = nn; switch(net->modules[i].type) { case MT_SpatialConvolutionMM: case MT_SpatialConvolution: case MT_SpatialConvolutionVirtMM: nn->modules[i].updateOutput = Lowp_SpatialConvolution_updateOutput; nn->modules[i].SpatialConvolution.weight = THLowpTensor_newFromFloatTensor(net->modules[i].SpatialConvolution.weight); nn->modules[i].SpatialConvolution.bias = THLowpTensor_newFromFloatTensor(net->modules[i].SpatialConvolution.bias); nn->modules[i].SpatialConvolution.finput = THFloatTensor_new(); break; case MT_SpatialMaxPooling: nn->modules[i].SpatialMaxPooling.indices = 0; nn->modules[i].updateOutput = Lowp_SpatialMaxPooling_updateOutput; break; case MT_SpatialMaxUnpooling: THError("MT_SpatialMaxUnpooling not supported in Lowp"); break; case MT_Threshold: nn->modules[i].updateOutput = Lowp_Threshold_updateOutput; break; case MT_SoftMax: nn->modules[i].updateOutput = Lowp_SoftMax_updateOutput; break; case MT_Dropout: if(!nn->modules[i].Dropout.v2) THError("Non v2 dropout not supported in Lowp"); break; case MT_SpatialZeroPadding: THError("SpatialZeroPadding not supported in Lowp"); break; case MT_Linear: nn->modules[i].type = MT_SpatialConvolutionMM; nn->modules[i].updateOutput = Lowp_SpatialConvolution_updateOutput; struct SpatialConvolution *c = &nn->modules[i].SpatialConvolution; c->finput = 0; c->padW = c->padH = 0; c->dW = c->dH = 1; c->kW = c->kH = 1; c->nOutputPlane = c->weight->size[0]; c->nInputPlane = c->weight->size[1]; nn->modules[i].SpatialConvolution.weight = THLowpTensor_newFromFloatTensor(net->modules[i].SpatialConvolution.weight); nn->modules[i].SpatialConvolution.bias = THLowpTensor_newFromFloatTensor(net->modules[i].SpatialConvolution.bias); nn->modules[i].SpatialConvolution.finput = THFloatTensor_new(); break; case MT_SpatialBatchNormalization: THError("MT_SpatialBatchNormalization not supported in Lowp"); break; case MT_SpatialFullConvolution: THError("MT_SpatialFullConvolution not supported in Lowp"); break; case MT_SpatialAveragePooling: THError("MT_SpatialAveragePooling not supported in lowp"); break; case MT_Sequential: THError("MT_Sequential not supported in lowp"); break; case MT_Concat: THError("MT_Concat not supported in lowp"); break; } } return nn; } unsigned char THLowp_ScaleFloat(THFloatTensor *t, float value) { float scaled = (value - t->sub) * t->mult; if(scaled < 0) return 0; if(scaled > 255) return 255; return (unsigned char)scaled; } THFloatStorage *THLowpStorage_new(long size) { THFloatStorage *s = malloc(sizeof(*s)); s->data = malloc(size); if(!s->data) THError("Out of memory"); s->nref = 1; s->mustfree = 1; return s; } void THLowpTensor_resizeAs(THFloatTensor *tdst, THFloatTensor *tsrc) { if(tsrc == tdst) return; long nelemsrc = THFloatTensor_nElement(tsrc); long nelemdst = THFloatTensor_nElement(tdst); tdst->nDimension = tsrc->nDimension; memcpy(tdst->size, tsrc->size, sizeof(tsrc->size)); memcpy(tdst->stride, tsrc->stride, sizeof(tsrc->stride)); if(nelemsrc != nelemdst) { if(tdst->storage) tdst->storage->data = realloc(tdst->storage->data, nelemsrc); else tdst->storage = THLowpStorage_new(nelemsrc); } } void THLowpTensor_resize4d(THFloatTensor *t, long size0, long size1, long size2, long size3) { long nElement = THFloatTensor_nElement(t); t->nDimension = 4; t->size[0] = size0; t->size[1] = size1; t->size[2] = size2; t->size[3] = size3; t->stride[3] = 1; t->stride[2] = size3; t->stride[1] = size2 * size3; t->stride[0] = size1 * size2 * size3; if(nElement != size0 * size1 * size2 * size3) { if(t->storage) t->storage->data = realloc(t->storage->data, size0 * size1 * size2 * size3); else t->storage = THLowpStorage_new(size0 * size1 * size2 * size3); } } void THLowpTensor_resize3d(THFloatTensor *t, long size0, long size1, long size2) { long nElement = THFloatTensor_nElement(t); t->nDimension = 3; t->size[0] = size0; t->size[1] = size1; t->size[2] = size2; t->stride[2] = 1; t->stride[1] = size2; t->stride[0] = size1 * size2; if(nElement != size0 * size1 * size2) { if(t->storage) t->storage->data = realloc(t->storage->data, size0 * size1 * size2); else t->storage = THLowpStorage_new(size0 * size1 * size2); } } void lowpgemm(const int is_a_transposed, const int is_b_transposed, const int is_c_transposed, const int m, const int n, const int k, const unsigned char *a, const unsigned char *b, unsigned char *c, const int lda, const int ldb, const int ldc, const int a_offset, const int b_offset, const int c_offset, const int c_mult, const int c_shift); void THLowpTensor_mm(THFloatTensor *r_, THFloatTensor *m1, THFloatTensor *m2) { const int transpose_m1 = 1; const int transpose_m2 = 1; const int transpose_r_ = 1; const int m = m1->size[0]; const int n = m2->size[1]; const int k = m1->size[1]; float scaling = m1->mult * m2->mult / r_->mult; int shift = roundf((log(scaling) / log(2))) + 10; // 10 is to keep mult a 10 bit number to keep 8 bit precision int mult = roundf((1<<shift) / scaling); int offset = roundf(-r_->sub * m1->mult * m2->mult); lowpgemm(transpose_m1, transpose_m2, transpose_r_, m, n, k, (unsigned char *)THFloatTensor_data(m1), (unsigned char *)THFloatTensor_data(m2), (unsigned char *)THFloatTensor_data(r_), k, n, n, roundf(m1->sub * m1->mult), roundf(m2->sub * m2->mult), offset, mult, shift); }
wrongPercolation.c
#include<stdio.h> #include<omp.h> int main() { int shared = 42; int shared2 = 42; #pragma omp parallel { if (omp_get_thread_num() == 0) { while (1) { #pragma omp barrier if (shared2 > 23) { #pragma omp atomic write shared = 10; break; } #pragma omp atomic write shared = 0; } } else { #pragma omp barrier int t = 0; #pragma omp atomic read t = shared; printf("%d\n", t); } } }
GenerateICs.c
#include <math.h> #include <unistd.h> #include <stdio.h> #include <stdbool.h> #include <ctype.h> #include <stdlib.h> #include <time.h> #include <string.h> //#include <pthread.h> #include <omp.h> #include <complex.h> #include <fftw3.h> #include <gsl/gsl_interp.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_spline.h> #include "21cmFAST.h" #include "exceptions.h" #include "logger.h" #include "Constants.h" #include "Globals.h" #include "indexing.c" #include "UsefulFunctions.c" #include "ps.c" #include "dft.c" #include "PerturbField.c" #include "bubble_helper_progs.c" #include "elec_interp.c" #include "heating_helper_progs.c" #include "recombinations.c" #include "IonisationBox.c" #include "SpinTemperatureBox.c" #include "BrightnessTemperatureBox.c" #include "FindHaloes.c" #include "PerturbHaloField.c" void adj_complex_conj(fftwf_complex *HIRES_box, struct UserParams *user_params, struct CosmoParams *cosmo_params){ /***** Adjust the complex conjugate relations for a real array *****/ int i, j, k; // corners HIRES_box[C_INDEX(0,0,0)] = 0; HIRES_box[C_INDEX(0,0,MIDDLE)] = crealf(HIRES_box[C_INDEX(0,0,MIDDLE)]); HIRES_box[C_INDEX(0,MIDDLE,0)] = crealf(HIRES_box[C_INDEX(0,MIDDLE,0)]); HIRES_box[C_INDEX(0,MIDDLE,MIDDLE)] = crealf(HIRES_box[C_INDEX(0,MIDDLE,MIDDLE)]); HIRES_box[C_INDEX(MIDDLE,0,0)] = crealf(HIRES_box[C_INDEX(MIDDLE,0,0)]); HIRES_box[C_INDEX(MIDDLE,0,MIDDLE)] = crealf(HIRES_box[C_INDEX(MIDDLE,0,MIDDLE)]); HIRES_box[C_INDEX(MIDDLE,MIDDLE,0)] = crealf(HIRES_box[C_INDEX(MIDDLE,MIDDLE,0)]); HIRES_box[C_INDEX(MIDDLE,MIDDLE,MIDDLE)] = crealf(HIRES_box[C_INDEX(MIDDLE,MIDDLE,MIDDLE)]); // do entire i except corners #pragma omp parallel shared(HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=1; i<MIDDLE; i++){ // just j corners for (j=0; j<=MIDDLE; j+=MIDDLE){ for (k=0; k<=MIDDLE; k+=MIDDLE){ HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,j,k)]); } } // all of j for (j=1; j<MIDDLE; j++){ for (k=0; k<=MIDDLE; k+=MIDDLE){ HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,(user_params->DIM)-j,k)]); HIRES_box[C_INDEX(i,(user_params->DIM)-j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,j,k)]); } } } // end loop over i } // now the i corners #pragma omp parallel shared(HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<=MIDDLE; i+=MIDDLE){ for (j=1; j<MIDDLE; j++){ for (k=0; k<=MIDDLE; k+=MIDDLE){ HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX(i,(user_params->DIM)-j,k)]); } } } // end loop over remaining j } } // Re-write of init.c for original 21cmFAST int ComputeInitialConditions( unsigned long long random_seed, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct InitialConditions *boxes ){ // Generates the initial conditions: gaussian random density field (user_params->DIM^3) as well as the equal or lower resolution velocity fields, and smoothed density field (user_params->HII_DIM^3). // // Author: Andrei Mesinger // Date: 9/29/06 int status; Try{ // This Try wraps the entire function so we don't indent. // Makes the parameter structs visible to a variety of functions/macros // Do each time to avoid Python garbage collection issues Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); unsigned long long ct; int n_x, n_y, n_z, i, j, k, ii, thread_num, dimension; float k_x, k_y, k_z, k_mag, p, a, b, k_sq; double pixel_deltax; float p_vcb, vcb_i; float f_pixel_factor; gsl_rng * r[user_params->N_THREADS]; gsl_rng * rseed = gsl_rng_alloc(gsl_rng_mt19937); // An RNG for generating seeds for multithreading gsl_rng_set(rseed, random_seed); omp_set_num_threads(user_params->N_THREADS); switch(user_params->PERTURB_ON_HIGH_RES) { case 0: dimension = user_params->HII_DIM; break; case 1: dimension = user_params->DIM; break; } // ************ INITIALIZATION ********************** // unsigned int seeds[user_params->N_THREADS]; // For multithreading, seeds for the RNGs are generated from an initial RNG (based on the input random_seed) and then shuffled (Author: Fred Davies) int num_int = INT_MAX/16; unsigned int *many_ints = (unsigned int *)malloc((size_t)(num_int*sizeof(unsigned int))); // Some large number of possible integers for (i=0; i<num_int; i++) { many_ints[i] = i; } gsl_ran_choose(rseed, seeds, user_params->N_THREADS, many_ints, num_int, sizeof(unsigned int)); // Populate the seeds array from the large list of integers gsl_ran_shuffle(rseed, seeds, user_params->N_THREADS, sizeof(unsigned int)); // Shuffle the randomly selected integers int checker; checker = 0; // seed the random number generators for (thread_num = 0; thread_num < user_params->N_THREADS; thread_num++){ switch (checker){ case 0: r[thread_num] = gsl_rng_alloc(gsl_rng_mt19937); gsl_rng_set(r[thread_num], seeds[thread_num]); break; case 1: r[thread_num] = gsl_rng_alloc(gsl_rng_gfsr4); gsl_rng_set(r[thread_num], seeds[thread_num]); break; case 2: r[thread_num] = gsl_rng_alloc(gsl_rng_cmrg); gsl_rng_set(r[thread_num], seeds[thread_num]); break; case 3: r[thread_num] = gsl_rng_alloc(gsl_rng_mrg); gsl_rng_set(r[thread_num], seeds[thread_num]); break; case 4: r[thread_num] = gsl_rng_alloc(gsl_rng_taus2); gsl_rng_set(r[thread_num], seeds[thread_num]); break; } // end switch checker += 1; if(checker==5) { checker = 0; } } free(many_ints); // allocate array for the k-space and real-space boxes fftwf_complex *HIRES_box = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); fftwf_complex *HIRES_box_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); // allocate array for the k-space and real-space boxes for vcb fftwf_complex *HIRES_box_vcb_saved; // HIRES_box_vcb_saved may be needed if FFTW_Wisdom doesn't exist -- currently unused // but I am not going to allocate it until I am certain I needed it. // find factor of HII pixel size / deltax pixel size f_pixel_factor = user_params->DIM/(float)user_params->HII_DIM; // ************ END INITIALIZATION ****************** // LOG_DEBUG("Finished initialization."); // ************ CREATE K-SPACE GAUSSIAN RANDOM FIELD *********** // init_ps(); #pragma omp parallel shared(HIRES_box,r) \ private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,p,a,b,p_vcb) num_threads(user_params->N_THREADS) { #pragma omp for for (n_x=0; n_x<user_params->DIM; n_x++){ // convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n if (n_x>MIDDLE) k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention else k_x = n_x * DELTA_K; for (n_y=0; n_y<user_params->DIM; n_y++){ // convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n if (n_y>MIDDLE) k_y =(n_y-user_params->DIM) * DELTA_K; else k_y = n_y * DELTA_K; // since physical space field is real, only half contains independent modes for (n_z=0; n_z<=MIDDLE; n_z++){ // convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n k_z = n_z * DELTA_K; // now get the power spectrum; remember, only the magnitude of k counts (due to issotropy) // this could be used to speed-up later maybe k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z); p = power_in_k(k_mag); // ok, now we can draw the values of the real and imaginary part // of our k entry from a Gaussian distribution if(user_params->NO_RNG) { a = 1.0; b = -1.0; } else { a = gsl_ran_ugaussian(r[omp_get_thread_num()]); b = gsl_ran_ugaussian(r[omp_get_thread_num()]); } HIRES_box[C_INDEX(n_x, n_y, n_z)] = sqrt(VOLUME*p/2.0) * (a + b*I); } } } } LOG_DEBUG("Drawn random fields."); // ***** Adjust the complex conjugate relations for a real array ***** // adj_complex_conj(HIRES_box,user_params,cosmo_params); memcpy(HIRES_box_saved, HIRES_box, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); // FFT back to real space int stat = dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box); if(stat>0) Throw(stat); LOG_DEBUG("FFT'd hires boxes."); #pragma omp parallel shared(boxes,HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->DIM; i++){ for (j=0; j<user_params->DIM; j++){ for (k=0; k<user_params->DIM; k++){ *((float *)boxes->hires_density + R_INDEX(i,j,k)) = *((float *)HIRES_box + R_FFT_INDEX(i,j,k))/VOLUME; } } } } // *** If required, let's also create a lower-resolution version of the density field *** // memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); // Only filter if we are perturbing on the low-resolution grid if(!user_params->PERTURB_ON_HIGH_RES) { if (user_params->DIM != user_params->HII_DIM) { filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0)); } // FFT back to real space dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box); // Renormalise the FFT'd box (sample the high-res box if we are perturbing on the low-res grid) #pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ boxes->lowres_density[HII_R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5)))/VOLUME; } } } } } // ******* Relative Velocity part ******* // if(user_params->USE_RELATIVE_VELOCITIES){ //JBM: We use the memory allocated to HIRES_box as it's free. for(ii=0;ii<3;ii++) { memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); #pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,p,p_vcb) num_threads(user_params->N_THREADS) { #pragma omp for for (n_x=0; n_x<user_params->DIM; n_x++){ if (n_x>MIDDLE) k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention else k_x = n_x * DELTA_K; for (n_y=0; n_y<user_params->DIM; n_y++){ if (n_y>MIDDLE) k_y =(n_y-user_params->DIM) * DELTA_K; else k_y = n_y * DELTA_K; for (n_z=0; n_z<=MIDDLE; n_z++){ k_z = n_z * DELTA_K; k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z); p = power_in_k(k_mag); p_vcb = power_in_vcb(k_mag); // now set the velocities if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode HIRES_box[0] = 0; } else{ if(ii==0) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_x/k_mag * sqrt(p_vcb/p) * C_KMS; } if(ii==1) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_y/k_mag * sqrt(p_vcb/p) * C_KMS; } if(ii==2) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_z/k_mag * sqrt(p_vcb/p) * C_KMS; } } } } } } //we only care about the lowres vcb box, so we filter it directly. if (user_params->DIM != user_params->HII_DIM) { filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0)); } //fft each velocity component back to real space dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box); #pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii) private(i,j,k,vcb_i) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ vcb_i = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); boxes->lowres_vcb[HII_R_INDEX(i,j,k)] += vcb_i*vcb_i; } } } } } //now we take the sqrt of that and normalize the FFT for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ boxes->lowres_vcb[HII_R_INDEX(i,j,k)] = sqrt(boxes->lowres_vcb[HII_R_INDEX(i,j,k)])/VOLUME; } } } } LOG_DEBUG("Completed Relative velocities."); // ******* End of Relative Velocity part ******* // // Now look at the velocities for(ii=0;ii<3;ii++) { memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); // Now let's set the velocity field/dD/dt (in comoving Mpc) #pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS) { #pragma omp for for (n_x=0; n_x<user_params->DIM; n_x++){ if (n_x>MIDDLE) k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention else k_x = n_x * DELTA_K; for (n_y=0; n_y<user_params->DIM; n_y++){ if (n_y>MIDDLE) k_y =(n_y-user_params->DIM) * DELTA_K; else k_y = n_y * DELTA_K; for (n_z=0; n_z<=MIDDLE; n_z++){ k_z = n_z * DELTA_K; k_sq = k_x*k_x + k_y*k_y + k_z*k_z; // now set the velocities if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode HIRES_box[0] = 0; } else{ if(ii==0) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_x*I/k_sq/VOLUME; } if(ii==1) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_y*I/k_sq/VOLUME; } if(ii==2) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_z*I/k_sq/VOLUME; } } } } } } // Filter only if we require perturbing on the low-res grid if(!user_params->PERTURB_ON_HIGH_RES) { if (user_params->DIM != user_params->HII_DIM) { filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0)); } } dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box); // now sample to lower res // now sample the filtered box #pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { if(ii==0) { boxes->hires_vx[R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(ii==1) { boxes->hires_vy[R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(ii==2) { boxes->hires_vz[R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } } else { if(ii==0) { boxes->lowres_vx[HII_R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); } if(ii==1) { boxes->lowres_vy[HII_R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); } if(ii==2) { boxes->lowres_vz[HII_R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); } } } } } } } LOG_DEBUG("Done Inverse FT."); // * *************************************************** * // // * BEGIN 2LPT PART * // // * *************************************************** * // // Generation of the second order Lagrangian perturbation theory (2LPT) corrections to the ZA // reference: Scoccimarro R., 1998, MNRAS, 299, 1097-1118 Appendix D // Parameter set in ANAL_PARAMS.H if(user_params->USE_2LPT){ // use six supplementary boxes to store the gradients of phi_1 (eq. D13b) // Allocating the boxes #define PHI_INDEX(i, j) ((int) ((i) - (j)) + 3*((j)) - ((int)(j))/2 ) // ij -> INDEX // 00 -> 0 // 11 -> 3 // 22 -> 5 // 10 -> 1 // 20 -> 2 // 21 -> 4 fftwf_complex *phi_1 = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); // First generate the ii,jj phi_1 boxes int phi_component; float component_ii,component_jj,component_ij; // Indexing for the various phy components int phi_directions[3][2] = {{0,1},{0,2},{1,2}}; #pragma omp parallel shared(HIRES_box,phi_1) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->DIM; i++){ for (j=0; j<user_params->DIM; j++){ for (k=0; k<user_params->DIM; k++){ *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k)) ) = 0.; } } } } // First iterate over the i = j components to phi // We'll also save these temporarily to the hires_vi_2LPT boxes which will get // overwritten later with the correct 2LPT velocities for(phi_component=0;phi_component<3;phi_component++) { i = j = phi_component; // generate the phi_1 boxes in Fourier transform #pragma omp parallel shared(HIRES_box,phi_1,i,j) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq,k) num_threads(user_params->N_THREADS) { #pragma omp for for (n_x=0; n_x<user_params->DIM; n_x++){ if (n_x>MIDDLE) k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention else k_x = n_x * DELTA_K; for (n_y=0; n_y<user_params->DIM; n_y++){ if (n_y>MIDDLE) k_y =(n_y-user_params->DIM) * DELTA_K; else k_y = n_y * DELTA_K; for (n_z=0; n_z<=MIDDLE; n_z++){ k_z = n_z * DELTA_K; k_sq = k_x*k_x + k_y*k_y + k_z*k_z; float k[] = {k_x, k_y, k_z}; // now set the velocities if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode phi_1[0] = 0; } else{ phi_1[C_INDEX(n_x,n_y,n_z)] = -k[i]*k[j]*HIRES_box_saved[C_INDEX(n_x, n_y, n_z)]/k_sq/VOLUME; // note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT } } } } } dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, phi_1); // Temporarily store in the allocated hires_vi_2LPT boxes #pragma omp parallel shared(boxes,phi_1,phi_component) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->DIM; i++){ for (j=0; j<user_params->DIM; j++){ for (k=0; k<user_params->DIM; k++){ if(phi_component==0) { boxes->hires_vx_2LPT[R_INDEX(i,j,k)] = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(phi_component==1) { boxes->hires_vy_2LPT[R_INDEX(i,j,k)] = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(phi_component==2) { boxes->hires_vz_2LPT[R_INDEX(i,j,k)] = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } } } } } } for(phi_component=0;phi_component<3;phi_component++) { // Now calculate the cross components and start evaluating the 2LPT field i = phi_directions[phi_component][0]; j = phi_directions[phi_component][1]; // generate the phi_1 boxes in Fourier transform #pragma omp parallel shared(HIRES_box,phi_1) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq,k) num_threads(user_params->N_THREADS) { #pragma omp for for (n_x=0; n_x<user_params->DIM; n_x++){ if (n_x>MIDDLE) k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention else k_x = n_x * DELTA_K; for (n_y=0; n_y<user_params->DIM; n_y++){ if (n_y>MIDDLE) k_y =(n_y-user_params->DIM) * DELTA_K; else k_y = n_y * DELTA_K; for (n_z=0; n_z<=MIDDLE; n_z++){ k_z = n_z * DELTA_K; k_sq = k_x*k_x + k_y*k_y + k_z*k_z; float k[] = {k_x, k_y, k_z}; // now set the velocities if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode phi_1[0] = 0; } else{ phi_1[C_INDEX(n_x,n_y,n_z)] = -k[i]*k[j]*HIRES_box_saved[C_INDEX(n_x, n_y, n_z)]/k_sq/VOLUME; // note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT } } } } } dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, phi_1); // Then we will have the laplacian of phi_2 (eq. D13b) // After that we have to return in Fourier space and generate the Fourier transform of phi_2 #pragma omp parallel shared(HIRES_box,phi_1,phi_component) private(i,j,k,component_ii,component_jj,component_ij) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->DIM; i++){ for (j=0; j<user_params->DIM; j++){ for (k=0; k<user_params->DIM; k++){ // Note, I have temporarily stored the components into other arrays to minimise memory usage // phi - {0, 1, 2} -> {hires_vx_2LPT, hires_vy_2LPT, hires_vz_2LPT} // This may be opaque to the user, but this shouldn't need modification if(phi_component==0) { component_ii = boxes->hires_vx_2LPT[R_INDEX(i,j,k)]; component_jj = boxes->hires_vy_2LPT[R_INDEX(i,j,k)]; component_ij = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(phi_component==1) { component_ii = boxes->hires_vx_2LPT[R_INDEX(i,j,k)]; component_jj = boxes->hires_vz_2LPT[R_INDEX(i,j,k)]; component_ij = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(phi_component==2) { component_ii = boxes->hires_vy_2LPT[R_INDEX(i,j,k)]; component_jj = boxes->hires_vz_2LPT[R_INDEX(i,j,k)]; component_ij = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } // Kept in this form to maintain similar (possible) rounding errors *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k)) ) += \ ( component_ii * component_jj ); *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k)) ) -= \ ( component_ij * component_ij ); } } } } } #pragma omp parallel shared(HIRES_box,phi_1) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->DIM; i++){ for (j=0; j<user_params->DIM; j++){ for (k=0; k<user_params->DIM; k++){ *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),(unsigned long long)(j),(unsigned long long)(k)) ) /= TOT_NUM_PIXELS; } } } } // Perform FFTs dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box); memcpy(HIRES_box_saved, HIRES_box, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); // Now we can store the content of box in a back-up array // Then we can generate the gradients of phi_2 (eq. D13b and D9) // ***** Store back-up k-box RHS eq. D13b ***** // // For each component, we generate the velocity field (same as the ZA part) // Now let's set the velocity field/dD/dt (in comoving Mpc) // read in the box // TODO correct free of phi_1 for(ii=0;ii<3;ii++) { if(ii>0) { memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); } #pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS) { #pragma omp for // set velocities/dD/dt for (n_x=0; n_x<user_params->DIM; n_x++){ if (n_x>MIDDLE) k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention else k_x = n_x * DELTA_K; for (n_y=0; n_y<user_params->DIM; n_y++){ if (n_y>MIDDLE) k_y =(n_y-user_params->DIM) * DELTA_K; else k_y = n_y * DELTA_K; for (n_z=0; n_z<=MIDDLE; n_z++){ k_z = n_z * DELTA_K; k_sq = k_x*k_x + k_y*k_y + k_z*k_z; // now set the velocities if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode HIRES_box[0] = 0; } else{ if(ii==0) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_x*I/k_sq; } if(ii==1) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_y*I/k_sq; } if(ii==2) { HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_z*I/k_sq; } } } // note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT } } } // Filter only if we require perturbing on the low-res grid if(!user_params->PERTURB_ON_HIGH_RES) { if (user_params->DIM != user_params->HII_DIM) { filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0)); } } dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box); // now sample to lower res // now sample the filtered box #pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { if(ii==0) { boxes->hires_vx_2LPT[R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(ii==1) { boxes->hires_vy_2LPT[R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } if(ii==2) { boxes->hires_vz_2LPT[R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i), (unsigned long long)(j), (unsigned long long)(k))); } } else { if(ii==0) { boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); } if(ii==1) { boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); } if(ii==2) { boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); } } } } } } } // deallocate the supplementary boxes fftwf_free(phi_1); } LOG_DEBUG("Done 2LPT."); // * *********************************************** * // // * END 2LPT PART * // // * *********************************************** * // fftwf_cleanup_threads(); fftwf_cleanup(); fftwf_forget_wisdom(); // deallocate fftwf_free(HIRES_box); fftwf_free(HIRES_box_saved); free_ps(); for (i=0; i<user_params->N_THREADS; i++) { gsl_rng_free (r[i]); } gsl_rng_free(rseed); LOG_DEBUG("Cleaned Up."); } // End of Try{} Catch(status){ return(status); } return(0); }
test12.c
int x = 0; void foo() { if (1) { return; } else { return; } } int main(int argc, char * argv[]) { do { } while (2); x++; int i; for (i = 0; i < 10; i++) { } // foo(); // return; // int i; // i = 10; //#pragma omp master // { // int i; // i++; // } // foo(); // l1: l2: 55+i; // 50+3; //#pragma omp for // for (i = 0; i < 10; i++) { // int x; //// if (1) { // x + 13; // continue; //// } // } // // l1: 33; // if (1) { // int x; // x + 11; // } // if (2) { // int x; // x + 12; // } else { // int x; // x + 13; // } // if (1) { // int x; // x+1; // return 1; // } // l1: l2: 31; // 32; // 33; // int x; // if (11) { // } //#pragma omp task if (1) final(1) // { // // } // if (51) { // // } else { // // } // int k; while (k) { } while (1) { int x; x + 3; continue; } while (1) { if (1) { continue; } else { break; } } while (0) { 13; } while (1) { break; } // switch(i) { // case 1: // 11; // break; // } // switch (i) { // case 1: // 11; // int x; // x + 3; // break; // case 2: // break; // } // switch (i) { // case 1: // 11; // int x; // x + 3; // case 2: // break; // } // switch (i) { // case 1: // 11; // int x; // x + 3; // break; // default: // break; // } // switch (i) { // case 1: // 11; // int x; // x + 3; // break; // default: // ; // } // if (1) { // goto l1; // } else if (2){ // goto l2; // } // int j = 10; // do { // int x; // x + 33; // continue; // } while (j++ < 10); //#pragma omp parallel //#pragma omp for // for (j = 0; j< 10; j++) { // continue; // } // do { // int x; // x+3; // if (1) { // break; // } else if (2) { // continue; // } else if (3) { // int x; // x+13; // continue; // } // } while (x); // while (x) { // if (1) { // break; // } else if (2) { // continue; // } else if (3) { // continue; // } // } // // for (i = 0; i < 10; i++) { // int x; // x + 2; // continue; // } // for (i = 0; i < 10;) { // if (1) { // if (11) { // int x; // x + 3; // } // continue; // } // if (12) { //// if (11) { // int i; // i + 3; //// } // continue; // } //// if (1) { //// int x; //// x + 333; //// continue; //// } else if (2) { //// int x; //// x + 3; //// break; //// } // } // for (i = 0; i < 10; i++) { // int i; // i++; // if (1) { // break; // } else if (2) { // int x; // x + 3; // continue; // } else if (3) { // continue; // } // 111; // } // // for (; i < 10; i++) { // if (1) { // break; // } else if (2) { // continue; // } else if (3) { // continue; // } // 111; // } // // for (; i < 10;) { // int x; // x + 3; // if (1) { // break; // } else if (2) { // continue; // } else if (3) { // continue; // } // 111; // } // // for (i = 0; i < 10; i++) { // if (1) { // break; // } else if (2) { // continue; // } else if (3) { // continue; // } // 111; // } // // do { // 8; // } while (7); //#pragma omp parallel num_threads(1) // { // int x = 0; // 31+x; // } // //#pragma omp parallel // { //#pragma omp single // { // int x; // x+31; // } // //#pragma omp task if (1) // { // int x; // x+41; // } // //#pragma omp master // { // int x; // x+61; // } // //#pragma omp critical // { // int x; // x+51; // } // //#pragma omp atomic write // x = 61; // //#pragma omp ordered // { // int x; // x+71; // } // } // //#pragma omp parallel // { //#pragma omp sections // { //#pragma omp section // { // int x; // x+31; // } //#pragma omp section // { // int x; // x+41; // } // } // } ////////////////////////// //#pragma omp parallel // { //#pragma omp for // for (i = 0; i < 8; i++) { // 111; // if (1) { // continue; // } //// if (1) { //// break; //// } else if (2) { //// continue; //// } else if (3) { //// continue; //// } // } // } //////////////////////// // //#pragma omp parallel // { // 3; // } }
deconvolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.data + out.w * i; float* outptr0 = outptr; float* outptr1 = outptr + outw; float* outptr2 = outptr + outw*2; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); #if 0 // bad compiler generate slow instructions :( // 0 float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // ext float32x4_t _zero_out01 = vdupq_n_f32(0.f); _zero_out01 = vextq_f32(_zero_out01, _out01, 3); _out00 = vaddq_f32(_out00, _zero_out01); // float32x2_t _out00low = vget_low_f32(_out00); float32x2_t _out00high = vget_high_f32(_out00); _out00high = vmla_lane_f32(_out00high, vget_low_f32(_v), vget_high_f32(_k0), 0); _out00 = vcombine_f32(_out00low, _out00high); vst1q_f32(outptr0 + 0, _out00); // float32x2_t _out02high = vld1_f32(outptr0 + 4); float32x2_t _out01_zero = vext_f32(vget_high_f32(_out01), vget_low_f32(_zero_out01), 1); _out02high = vadd_f32(_out02high, _out01_zero); _out02high = vmla_lane_f32(_out02high, vget_high_f32(_v), vget_high_f32(_k0), 0); vst1_f32(outptr0 + 4, _out02high); // 1 float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // ext float32x4_t _zero_out11 = vdupq_n_f32(0.f); _zero_out11 = vextq_f32(_zero_out11, _out11, 3); _out10 = vaddq_f32(_out10, _zero_out11); // float32x2_t _out10low = vget_low_f32(_out10); float32x2_t _out10high = vget_high_f32(_out10); _out10high = vmla_lane_f32(_out10high, vget_low_f32(_v), vget_high_f32(_k1), 0); _out10 = vcombine_f32(_out10low, _out10high); vst1q_f32(outptr1 + 0, _out10); // float32x2_t _out12high = vld1_f32(outptr1 + 4); float32x2_t _out11_zero = vext_f32(vget_high_f32(_out11), vget_low_f32(_zero_out11), 1); _out12high = vadd_f32(_out12high, _out11_zero); _out12high = vmla_lane_f32(_out12high, vget_high_f32(_v), vget_high_f32(_k1), 0); vst1_f32(outptr1 + 4, _out12high); // 2 float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // ext float32x4_t _zero_out21 = vdupq_n_f32(0.f); _zero_out21 = vextq_f32(_zero_out21, _out21, 3); _out20 = vaddq_f32(_out20, _zero_out21); // float32x2_t _out20low = vget_low_f32(_out20); float32x2_t _out20high = vget_high_f32(_out20); _out20high = vmla_lane_f32(_out20high, vget_low_f32(_v), vget_high_f32(_k2), 0); _out20 = vcombine_f32(_out20low, _out20high); vst1q_f32(outptr2 + 0, _out20); // float32x2_t _out22high = vld1_f32(outptr2 + 4); float32x2_t _out21_zero = vext_f32(vget_high_f32(_out21), vget_low_f32(_zero_out21), 1); _out22high = vadd_f32(_out22high, _out21_zero); _out22high = vmla_lane_f32(_out22high, vget_high_f32(_v), vget_high_f32(_k2), 0); vst1_f32(outptr2 + 4, _out22high); #else // float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); vst1q_f32(outptr0 + 0, _out00); float32x4_t _out01 = vld1q_f32(outptr0 + 1); _out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1); vst1q_f32(outptr0 + 1, _out01); float32x4_t _out02 = vld1q_f32(outptr0 + 2); _out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0); vst1q_f32(outptr0 + 2, _out02); // float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); vst1q_f32(outptr1 + 0, _out10); float32x4_t _out11 = vld1q_f32(outptr1 + 1); _out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1); vst1q_f32(outptr1 + 1, _out11); float32x4_t _out12 = vld1q_f32(outptr1 + 2); _out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0); vst1q_f32(outptr1 + 2, _out12); // float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); vst1q_f32(outptr2 + 0, _out20); float32x4_t _out21 = vld1q_f32(outptr2 + 1); _out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1); vst1q_f32(outptr2 + 1, _out21); float32x4_t _out22 = vld1q_f32(outptr2 + 2); _out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0); vst1q_f32(outptr2 + 2, _out22); #endif r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0++; outptr1++; outptr2++; } } } } } static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.data + outw * i*2; float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); // out row 0 float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6 float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7 float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8 float32x4x2_t _out0 = vld2q_f32(outptr0); _out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6 _out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7 vst2q_f32(outptr0, _out0); _out0 = vld2q_f32(outptr0 + 2); _out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8 vst2q_f32(outptr0 + 2, _out0); // out row 1 float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6 float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7 float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8 float32x4x2_t _out1 = vld2q_f32(outptr1); _out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6 _out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7 vst2q_f32(outptr1, _out1); _out1 = vld2q_f32(outptr1 + 2); _out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8 vst2q_f32(outptr1 + 2, _out1); // out row 2 float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6 float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7 float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8 float32x4x2_t _out2 = vld2q_f32(outptr2); _out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6 _out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7 vst2q_f32(outptr2, _out2); _out2 = vld2q_f32(outptr2 + 2); _out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8 vst2q_f32(outptr2 + 2, _out2); r0 += 4; outptr0 += 8; outptr1 += 8; outptr2 += 8; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0 += 2; outptr1 += 2; outptr2 += 2; } } } } }
gems2.c
#include <stdio.h> #include <stdlib.h> #include <string.h> /***************************************************************/ #include <omp.h> void ** ompx_calloc(size_t bytes) { int np = omp_get_max_threads(); void ** ptrs = malloc(np*sizeof(void*)); #pragma omp parallel shared(ptrs) { int me = omp_get_thread_num(); ptrs[me] = malloc(bytes); memset(ptrs[me],0,bytes); } return ptrs; } void ompx_free(void ** ptrs) { #pragma omp parallel shared(ptrs) { int me = omp_get_thread_num(); free(ptrs[me]); } free(ptrs); } int main(int argc, char* argv[]) { int n = (argc>1) ? atoi(argv[1]) : 1<<20; int np = omp_get_max_threads(); if (np<2) exit(1); int ** A = (int**)ompx_calloc(n*sizeof(int)); #pragma omp parallel shared(A) { /* threaded computation */ } ompx_free((void**)A); return 0; } /***************************************************************/
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; /// This bit is set only for the Stmts that are the structured-block of /// OpenMP executable directives. Directives that have a structured block /// are called "non-standalone" directives. /// I.e. those returned by OMPExecutableDirective::getStructuredBlock(). unsigned IsOMPStructuredBlock : 1; }; enum { NumStmtBits = 9 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = NumStmtBits + 9 }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is trail-allocated. unsigned ResultKind : 2; /// Kind of Result as defined by APValue::Kind unsigned APValueKind : 4; /// When ResultKind == RSK_Int64. whether the trail-allocated integer is /// signed. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated /// integer. 7 bits because it is the minimal number of bit to represent a /// value from 0 to 64 (the size of the trail-allocated number). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the /// destructor on the trail-allocated APValue. unsigned HasCleanup : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 3; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 3; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; StmtBits.IsOMPStructuredBlock = false; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; } void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) { StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock; } /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
pr83977-3.c
/* PR middle-end/83977 */ /* { dg-do compile } */ void bar (void); int foo (int, int) __attribute__((used)); #pragma omp declare simd uniform (b) linear(a:b) int foo (int a, int b) { a = a + 1; /* This function can't be called from simd loops, because it violates declare simd restrictions. We shouldn't ICE on it though, nor attempt to generate simd clones for the *omp_fn* functions. */ #pragma omp parallel bar (); return a; } int foo (int, int) __attribute__((unused));
pr25996.c
/* PR c/25996 */ void test1 (void) { #pragma omp for for (i = 0; i < 1; ++i); /* { dg-error "undeclared|for each function" } */ } void test2 (void) { int i; #pragma omp for for (i = j; i < 1; ++i); /* { dg-error "undeclared" } */ } void test3 (void) { int i; #pragma omp for for (i = 0; i < j; ++i); /* { dg-error "undeclared|invalid controlling predicate" } */ } void test4 (void) { int i; #pragma omp for for (i = 0; i < 10; i += j); /* { dg-error "undeclared|invalid increment expression" } */ }
omp_nested.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" /* * Test if the compiler supports nested parallelism * By Chunhua Liao, University of Houston * Oct. 2005 */ int test_omp_nested() { #ifdef _OPENMP if (omp_get_max_threads() > 4) omp_set_num_threads(4); if (omp_get_max_threads() < 2) omp_set_num_threads(2); #endif int counter = 0; #ifdef _OPENMP omp_set_nested(1); #endif #pragma omp parallel shared(counter) { #pragma omp critical counter++; #pragma omp parallel { #pragma omp critical counter--; } } return (counter != 0); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_nested()) { num_failed++; } } return num_failed; }
mlp_mpi_ovlp_example_f32.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas (Intel Corp.) ******************************************************************************/ #include <libxsmm_dnn.h> #include <dnn_common.h> #include <mpi.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #define DETAILED_PROFILE #define N_PROF_THREADS 128 LIBXSMM_INLINE void my_init_buf_mlp(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } int main(int argc, char* argv[]) { /* Initialize the MPI environment */ int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); if(provided < MPI_THREAD_MULTIPLE) { printf("The threading support level is lesser than that demanded.\n"); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } float **act_libxsmm, **ref_act_libxsmm, **fil_libxsmm, **delact_libxsmm, **ref_delact_libxsmm, **delfil_libxsmm; float **bias_libxsmm, **delbias_libxsmm; unsigned char **relumask_libxsmm; int *label_libxsmm; void* scratch = NULL; size_t scratch_size = 0; libxsmm_matdiff_info norms; libxsmm_matdiff_clear(&norms); /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int n_procs = 4, n_comm_threads = 2, n_comp_threads;; int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int global_MB = 32; int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; int idx; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double l_fwd_fc[N_PROF_THREADS]; double l_bwdupd_fc[N_PROF_THREADS]; double l_allreduce[N_PROF_THREADS]; double l_optimizer[N_PROF_THREADS]; double l_fwd_loss[N_PROF_THREADS]; double l_bwd_loss[N_PROF_THREADS]; double first_bwdupd_compute = 0.0; double gflop = 0.0; int i, j, rank; double fil_size = 0.0; double act_size = 0.0; float lr = 0.2f; float loss_weight = 0.1f; libxsmm_datatype in_dt, out_dt, comp_dt; libxsmm_dnn_fc_eltw_fuse my_fuse; libxsmm_dnn_fc_fwd_config* libxsmm_dnn_fc_fwd; libxsmm_dnn_fc_bwd_config* libxsmm_dnn_fc_bwd; libxsmm_dnn_opt_config* libxsmm_dnn_opt; libxsmm_dnn_smax_fwd_config libxsmm_dnn_smax_fwd; libxsmm_dnn_smax_bwd_config libxsmm_dnn_smax_bwd; for (i = 0; i < N_PROF_THREADS; i++) { l_fwd_fc[i] = 0.0; l_bwdupd_fc[i] = 0.0; l_allreduce[i] = 0.0; l_optimizer[i] = 0.0; l_fwd_loss[i] = 0.0; l_bwd_loss[i] = 0.0; } if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 10; if (argc > i) n_comm_threads = atoi(argv[i++]); if (argc > i) iters = atoi(argv[i++]); if (argc > i) global_MB = atoi(argv[i++]); if (argc > i) fuse_type = atoi(argv[i++]); if (argc > i) type = *(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); n_comp_threads = nThreads - n_comm_threads; MPI_Request request[n_comm_threads][2]; /* Get the rank of the process */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &n_procs); MB = global_MB / n_procs; /* Setup communicators for overlapping threads */ MPI_Comm comms[n_comm_threads]; for (idx = 0; idx < n_comm_threads; idx++) { MPI_Comm_dup(MPI_COMM_WORLD, &comms[idx]); } /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; if (type != 'A' && type != 'F' && type != 'B') { printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n"); return -1; } if ( (fuse_type < 0) || (fuse_type > 5) ) { printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n"); return -1; } #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif in_dt = LIBXSMM_DATATYPE_F32; out_dt = LIBXSMM_DATATYPE_F32; comp_dt = LIBXSMM_DATATYPE_F32; /* print some summary */ if (rank == 0 ) { printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", global_MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(global_MB*C[i]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, global_MB, C[i], (double)(global_MB*C[i]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(global_MB*C[i+1]*sizeof(float))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, global_MB, C[i+1], (double)(global_MB*C[i+1]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(global_MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", global_MB, C[num_layers+1], (double)(global_MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) ); } /* allocate data */ /* +2 because of the softwax layer */ act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) ); ref_act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) ); delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) ); ref_delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) ); for ( i = 0 ; i < num_layers+2; ++i ) { act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); } } fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); } bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); /* init data on every node for numa awarness */ for ( i = 0 ; i < num_layers+2; ++i ) { my_init_buf_mlp( act_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers+1; ++i ) { my_init_buf_mlp( delact_libxsmm[i], MB*C[i], 0, 0 ); } /* Serial initialization of data on proc 0 */ if (rank == 0) { for ( i = 0 ; i < num_layers+2; ++i ) { ref_act_libxsmm[i] = (float*)libxsmm_aligned_malloc( global_MB*C[i]*sizeof(float), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { ref_delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( global_MB*C[i]*sizeof(float), 2097152); } } /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { my_init_buf_mlp( ref_act_libxsmm[i], global_MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers+1; ++i ) { my_init_buf_mlp( ref_delact_libxsmm[i], global_MB*C[i], 0, 0 ); } } /* Scatter the activations to all processes */ for ( i = 0 ; i < num_layers+2; ++i ) { MPI_Scatter(ref_act_libxsmm[i], MB * C[i], MPI_FLOAT, act_libxsmm[i], MB * C[i], MPI_FLOAT, 0, MPI_COMM_WORLD); } /* Scatter the del_activations to all processes */ for ( i = 0 ; i < num_layers+1; ++i ) { MPI_Scatter(ref_delact_libxsmm[i], MB * C[i], MPI_FLOAT, delact_libxsmm[i], MB * C[i], MPI_FLOAT, 0, MPI_COMM_WORLD); } /* Now broadcast weights tensors */ for ( i = 0 ; i < num_layers; ++i ) { MPI_Bcast(fil_libxsmm[i], C[i]*C[i+1], MPI_FLOAT, 0, MPI_COMM_WORLD); } /* Now broadcast bias tensors */ for ( i = 0 ; i < num_layers; ++i ) { MPI_Bcast(bias_libxsmm[i], C[i], MPI_FLOAT, 0, MPI_COMM_WORLD); } if (rank == 0) { printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); } if ( fuse_type == 0 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_NONE; } else if ( fuse_type == 1 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_BIAS; } else if ( fuse_type == 2 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_RELU_WITH_MASK; } else if ( fuse_type == 3 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_BIAS_RELU_WITH_MASK; } else { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_NONE; } /* allocating handles */ libxsmm_dnn_fc_fwd = (libxsmm_dnn_fc_fwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_fwd_config) ); libxsmm_dnn_fc_bwd = (libxsmm_dnn_fc_bwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_bwd_config) ); libxsmm_dnn_opt = (libxsmm_dnn_opt_config*) malloc( num_layers*sizeof(libxsmm_dnn_opt_config) ); /* setting up handles + scratch */ for ( i = 0; i < num_layers; ++i ) { libxsmm_dnn_fc_fwd[i] = setup_libxsmm_dnn_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse, in_dt, out_dt, comp_dt ); libxsmm_dnn_fc_bwd[i] = setup_libxsmm_dnn_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], n_comp_threads, my_fuse, in_dt, out_dt, comp_dt ); libxsmm_dnn_opt[i] = setup_libxsmm_dnn_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], n_comm_threads, lr, in_dt, out_dt, comp_dt ); /* let's allocate and bind scratch */ if ( libxsmm_dnn_fc_fwd[i].scratch_size > 0 || libxsmm_dnn_fc_bwd[i].scratch_size > 0 || libxsmm_dnn_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( libxsmm_dnn_fc_fwd[i].scratch_size, libxsmm_dnn_fc_bwd[i].scratch_size), libxsmm_dnn_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_malloc( scratch_size, 2097152 ); my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } } /* softmax+loss is treated as N+! layer */ libxsmm_dnn_smax_fwd = setup_libxsmm_dnn_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, in_dt, out_dt, comp_dt ); libxsmm_dnn_smax_bwd = setup_libxsmm_dnn_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight, in_dt, out_dt, comp_dt ); if ( libxsmm_dnn_smax_fwd.scratch_size > 0 || libxsmm_dnn_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( libxsmm_dnn_smax_fwd.scratch_size, libxsmm_dnn_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_malloc( scratch_size, 2097152 ); my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } if (type == 'F') { if (rank == 0) { printf("##########################################\n"); printf("# Performance - FWD (custom-Storage) #\n"); printf("##########################################\n"); } MPI_Barrier(MPI_COMM_WORLD); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (j = 0; j < iters; ++j) { for ( i = 0; i < num_layers; ++i) { libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss_weight, 0, tid, scratch ); } } MPI_Barrier(MPI_COMM_WORLD); l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = 0; i < num_layers; ++i) { gflop += (2.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } if (rank == 0) { printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); } } if (type == 'B') { if (rank == 0) { printf("##########################################\n"); printf("# Performance - BWD (custom-Storage) #\n"); printf("##########################################\n"); } MPI_Barrier(MPI_COMM_WORLD); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif int tid_comm = tid - n_comp_threads; for (j = 0; j < iters; ++j) { libxsmm_dnn_smax_bwd_exec_f32( libxsmm_dnn_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); for ( i = num_layers-1; i > 0; --i) { if (tid < n_comp_threads) { libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch ); } #pragma omp barrier if (tid >= n_comp_threads) { int n_elts = (C[i]*C[i+1])/n_comm_threads; MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[i]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]); libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid_comm, scratch ); } } /* Only UPD pass for first layer */ if (tid < n_comp_threads) { libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch ); } #pragma omp barrier if (tid >= n_comp_threads) { int n_elts = (C[0]*C[1])/n_comm_threads; MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[0]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]); libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid_comm, scratch ); } #pragma omp barrier } } MPI_Barrier(MPI_COMM_WORLD); l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (4.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (2.0*(double)global_MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); if (rank == 0) { printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); } MPI_Barrier(MPI_COMM_WORLD); #if 1 if (rank == n_procs - 1) { for ( i = 0 ; i < num_layers; ++i ) { libxsmm_matdiff(&norms, LIBXSMM_DATATYPE_F32, C[i]*C[i+1], 1, delfil_libxsmm[i], delfil_libxsmm[i], 0, 0); printf("L1 of layer's %d dweights after training : %.25g\n", i, norms.l1_ref); libxsmm_matdiff_clear(&norms); } } #endif } if (type == 'A') { if (rank == 0) { printf("##########################################\n"); printf("# Performance - FWD-BWD (custom-Storage) #\n"); printf("##########################################\n"); } MPI_Barrier(MPI_COMM_WORLD); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif int tid_comm = tid - n_comp_threads; unsigned long long t0, t1; for (j = 0; j < iters; ++j) { #ifdef DETAILED_PROFILE if (tid == 0) { t0 = libxsmm_timer_tick(); } #endif for ( i = 0; i < num_layers; ++i) { libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_fwd_fc[0] += libxsmm_timer_duration(t0, t1); t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss_weight, 0, tid, scratch ); #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_fwd_loss[0] += libxsmm_timer_duration(t0, t1); t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_smax_bwd_exec_f32( libxsmm_dnn_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_bwd_loss[0] += libxsmm_timer_duration(t0, t1); } #endif for ( i = num_layers-1; i > 0; --i) { if (tid < n_comp_threads) { #ifdef DETAILED_PROFILE if (tid == 0) { t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch ); #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_bwdupd_fc[0] += libxsmm_timer_duration(t0, t1); if (i == num_layers-1) { first_bwdupd_compute += libxsmm_timer_duration(t0, t1); } } #endif } #pragma omp barrier if (tid >= n_comp_threads) { #ifdef DETAILED_PROFILE if (tid == n_comp_threads) { t0 = libxsmm_timer_tick(); } #endif int n_elts = (C[i]*C[i+1])/n_comm_threads; MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[i]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]); #ifdef DETAILED_PROFILE if (tid == n_comp_threads) { t1 = libxsmm_timer_tick(); l_allreduce[0] += libxsmm_timer_duration(t0, t1); t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid_comm, scratch ); #ifdef DETAILED_PROFILE if (tid == n_comp_threads) { t1 = libxsmm_timer_tick(); l_optimizer[0] += libxsmm_timer_duration(t0, t1); } #endif } } /* Only UPD pass for first layer */ if (tid < n_comp_threads) { #ifdef DETAILED_PROFILE if (tid == 0) { t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch ); #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_bwdupd_fc[0] += libxsmm_timer_duration(t0, t1); } #endif } #pragma omp barrier if (tid >= n_comp_threads) { #ifdef DETAILED_PROFILE if (tid == n_comp_threads) { t0 = libxsmm_timer_tick(); } #endif int n_elts = (C[0]*C[1])/n_comm_threads; MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[0]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]); #ifdef DETAILED_PROFILE if (tid == n_comp_threads) { t1 = libxsmm_timer_tick(); l_allreduce[1] += libxsmm_timer_duration(t0, t1); t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid_comm, scratch ); #ifdef DETAILED_PROFILE if (tid == n_comp_threads) { t1 = libxsmm_timer_tick(); l_optimizer[1] += libxsmm_timer_duration(t0, t1); } #endif } #pragma omp barrier } } MPI_Barrier(MPI_COMM_WORLD); l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)global_MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); if (rank == 0) { printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); #ifdef DETAILED_PROFILE double tot = /*l_allreduce[0] + l_optimizer[0] +*/ l_fwd_fc[0] + l_bwdupd_fc[0] + l_fwd_loss[0] + l_bwd_loss[0]; printf("FC time compute/loss = %.5g\n", ((double)(tot/iters))); tot = l_allreduce[0] + l_optimizer[0]; printf("All-reduce + optimizer time overlaped = %.5g\n", ((double)(tot/iters))); printf("Bwdupd compute time overlaped = %.5g\n", ((double)((l_bwdupd_fc[0]-first_bwdupd_compute)/iters))); tot = l_optimizer[0] ; printf("Optimizer time= %.5g\n", ((double)(tot/iters))); tot = l_fwd_fc[0] + LIBXSMM_MAX( l_bwdupd_fc[0] - first_bwdupd_compute, l_allreduce[0] + l_optimizer[0]) + first_bwdupd_compute + l_fwd_loss[0] + l_bwd_loss[0] + l_allreduce[1] + l_optimizer[1]; printf("Total time on critical path = %.5g (exposed all_reduce + optimizer = %.5g) \n", ((double)(tot/iters)), (double)((l_allreduce[1] + l_optimizer[1])/iters)); #endif } MPI_Barrier(MPI_COMM_WORLD); #if 0 if (rank == n_procs - 1) { for ( i = 0 ; i < num_layers; ++i ) { libxsmm_matdiff(&norms, LIBXSMM_DATATYPE_F32, C[i]*C[i+1], 1, delfil_libxsmm[i], delfil_libxsmm[i], 0, 0); printf("L1 of layer's %d dweights after training : %.25g\n", i, norms.l1_ref); libxsmm_matdiff_clear(&norms); } } #endif } /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i+1]); libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); } libxsmm_free(act_libxsmm[num_layers+1]); libxsmm_free(label_libxsmm); free( act_libxsmm ); free( delact_libxsmm ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( libxsmm_dnn_opt ); free( libxsmm_dnn_fc_fwd ); free( libxsmm_dnn_fc_bwd ); free( C ); if (rank == 0) { for ( i = 0 ; i < num_layers+2; ++i ) { libxsmm_free(ref_act_libxsmm[i]); } free(ref_act_libxsmm); for ( i = 0 ; i < num_layers+1; ++i ) { libxsmm_free(ref_delact_libxsmm[i]); } free(ref_delact_libxsmm); } /* Finalize the MPI environment */ MPI_Finalize(); return 0; }
GB_subassign_17.c
//------------------------------------------------------------------------------ // GB_subassign_17: C(I,J)<!M,repl> = scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 17: C(I,J)<!M,repl> = scalar ; using S // M: present // Mask_comp: true // C_replace: true // accum: NULL // A: scalar // S: constructed // C: not bitmap // M: not bitmap #include "GB_subassign_methods.h" GrB_Info GB_subassign_17 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap const int64_t Cnvec = C->nvec ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; GB_GET_MASK ; GB_GET_SCALAR ; GB_GET_S ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 17: C(I,J)<!M,repl> = scalar ; using S //-------------------------------------------------------------------------- // Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is // required. The sparsity of !M cannot be exploited. // Methods 13, 15, 17, and 19 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; GB_GET_VECTOR_FOR_IXJ (M, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ; int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // assign the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { // both S (i,j) and A (i,j) present GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; GB_GET_VECTOR_FOR_IXJ (M, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ; int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // assign the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
2d_simple_v2.c
#include <stdlib.h> #include <omp.h> int main() { int* data = malloc(sizeof(int)); int** arr = malloc(sizeof(int*)); arr[0] = data; #pragma omp parallel { arr[0][0] = 42; printf("%d\n", arr[0][0]); } free(data); free(arr); }
GB_unaryop__abs_int8_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_int64 // op(A') function: GB_tran__abs_int8_int64 // C type: int8_t // A type: int64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_int64 ( int8_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
symgs.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ void smooth(level_type * level, int phi_id, int rhs_id, double a, double b){ fprintf(stderr, "symgs smooth\n"); int box,s; for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps (forward/backward) per GS smooth exchange_boundary(level,phi_id,stencil_is_star_shaped()); apply_BCs(level,phi_id,stencil_is_star_shaped()); uint64_t _timeStart = CycleTime(); // #pragma omp parallel for private(box) hclib::finish([] { hclib::loop_domain_1d loop(level->num_my_boxes); hclib::forasync_nb(&loop, [] (int box) { int i,j,k; const int ghosts = level->box_ghosts; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int dim = level->my_boxes[box].dim; const double h2inv = 1.0/(level->h*level->h); double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride); const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride); const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain if( (s&0x1)==0 ){ // forward sweep... hard to thread for(k=0;k<dim;k++){ for(j=0;j<dim;j++){ for(i=0;i<dim;i++){ int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); }}} }else{ // backward sweep... hard to thread for(k=dim-1;k>=0;k--){ for(j=dim-1;j>=0;j--){ for(i=dim-1;i>=0;i--){ int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); }}} } }, false, FORASYNC_MODE_FLAT); }); level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart); } // s-loop } //------------------------------------------------------------------------------------------------------------------------------
main.c
/* * Author: Wojciech Graj * License: MIT * Description: A raytracer in C * Libraries used: * cJSON (https://github.com/DaveGamble/cJSON) * SimplexNoise (https://github.com/SRombauts/SimplexNoise) * Setting macros: * MULTITHREADING - enables multithreading * UNBOUND_OBJECTS - allows for unbound objects such as planes * * Certain sections of code adapted from: * https://developer.nvidia.com/blog/thinking-parallel-part-iii-tree-construction-gpu/ * http://people.csail.mit.edu/amy/papers/box-jgt.pdf */ /* * TODO: * Image: * Normalization functions * Denoising * Optimization: * Consider LOD * Other: * Documentation * Port scenes to new format * Update readme gallery after resolving Lighting */ #include <float.h> #include <math.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "../lib/cJSON.h" #include "../lib/SimplexNoise.h" /******************************************************************************* * MACRO *******************************************************************************/ #ifdef MULTITHREADING #include <omp.h> int NUM_THREADS = 1; #endif #define clz __builtin_clz #define PI 3.1415927f #define MATERIAL_THRESHOLD 1e-6f #define likely(x) __builtin_expect((x), true) #define unlikely(x) __builtin_expect((x), false) /* ERROR */ #define err_assert(expr, err_code)\ do {\ if (unlikely(!(expr)))\ err(err_code);\ } while (false) #define SPHERICAL_TO_CARTESIAN(radius, inclination, azimuth)\ {radius * cosf(azimuth) * sinf(inclination),\ radius * sinf(azimuth) * sinf(inclination),\ radius * cosf(inclination)} /******************************************************************************* * TYPE DEFINITION *******************************************************************************/ typedef uint8_t Color[3]; typedef float Vec2[2]; typedef float Vec3[3]; typedef Vec3 Mat3[3]; typedef struct Context Context; typedef struct Line Line; /* Camera */ typedef struct Image Image; typedef struct Camera Camera; /* BoundingCuboid */ typedef struct BoundingCuboid BoundingCuboid; /* Material */ typedef struct Material Material; typedef struct Texture Texture; typedef struct TextureUniform TextureUniform; typedef struct TextureCheckerboard TextureCheckerboard; typedef struct TextureBrick TextureBrick; typedef struct TextureNoisyPeriodic TextureNoisyPeriodic; /* Object */ typedef struct Object Object; typedef struct Sphere Sphere; typedef struct Triangle Triangle; #ifdef UNBOUND_OBJECTS typedef struct Plane Plane; #endif typedef struct ObjectData ObjectData; /* BVH */ typedef struct BVH BVH; typedef union BVHChild BVHChild; typedef struct BVHWithMorton BVHWithMorton; /******************************************************************************* * GLOBAL *******************************************************************************/ enum directions{ X = 0, Y, Z, }; typedef enum { REFLECTION_PHONG, REFLECTION_BLINN, } ReflectionModel; typedef enum { GLOBAL_ILLUMINATION_AMBIENT, GLOBAL_ILLUMINATION_PATH_TRACING, } GlobalIlluminationModel; typedef enum { LIGHT_ATTENUATION_NONE, LIGHT_ATTENUATION_LINEAR, LIGHT_ATTENUATION_SQUARE, } LightAttenuation; typedef enum { LOG_NONE, LOG_REALTIME, LOG_CPUTIME, } LogOption; typedef enum { PERIODIC_FUNC_SIN, PERIODIC_FUNC_SAW, PERIODIC_FUNC_TRIANGLE, PERIODIC_FUNC_SQUARE, } PeriodicFunction; /* Object */ typedef enum { #ifdef UNBOUND_OBJECTS OBJECT_PLANE, #endif OBJECT_SPHERE, OBJECT_TRIANGLE, } ObjectType; /* ERROR */ typedef enum { ERR_ARGC, ERR_ARGV_FILE_EXT, ERR_ARGV_NUM_THREADS, ERR_ARGV_MULTITHREADING, ERR_ARGV_REFLECTION, ERR_ARGV_GLOBAL_ILLUMINATION, ERR_ARGV_LIGHT_ATTENUATION, ERR_ARGV_UNRECOGNIZED, ERR_ARGV_IO_OPEN_SCENE, ERR_ARGV_IO_OPEN_OUTPUT, ERR_IO_WRITE_IMG, ERR_JSON_KEY_NOT_STRING, ERR_JSON_UNRECOGNIZED_KEY, ERR_JSON_VALUE_NOT_NUMBER, ERR_JSON_VALUE_NOT_ARRAY, ERR_JSON_VALUE_NOT_STRING, ERR_JSON_VALUE_NOT_OBJECT, ERR_JSON_ARRAY_SIZE, ERR_JSON_FILENAME_NOT_STRING, ERR_JSON_IO_OPEN, ERR_JSON_ARGC, ERR_JSON_UNRECOGNIZED, ERR_JSON_IO_READ, ERR_JSON_NUM_TOKENS, ERR_JSON_READ_TOKENS, ERR_JSON_FIRST_TOKEN, ERR_JSON_ARGC_SCENE, ERR_JSON_CAMERA_FOV, ERR_JSON_INVALID_MATERIAL, ERR_JSON_EMITTANT, ERR_MALLOC, ERR_JSON_NO_CAMERA, ERR_JSON_NO_OBJECTS, ERR_JSON_NO_MATERIALS, ERR_JSON_NO_LIGHTS, ERR_STL_IO_FP, ERR_STL_IO_READ, ERR_STL_ENCODING, ERR_END, /* Used for determining number of error codes */ } ErrorCode; const char *ERR_FORMAT_STRING = "ERROR:%s\n"; const char *ERR_MESSAGES[ERR_END] = { [ERR_ARGC] = "Too few arguments. Use --help to find out which arguments are required to call this program.", [ERR_ARGV_FILE_EXT] = "ARGV: Output file must have the .ppm extension.", [ERR_ARGV_NUM_THREADS] = "ARGV: Specified number of threads is greater than the available number of threads.", [ERR_ARGV_MULTITHREADING] = "ARGV: Multithreading is disabled. To enable it, recompile the program with the -DMULTITHREADING parameter.", [ERR_ARGV_REFLECTION] = "ARGV: Unrecognized reflection model.", [ERR_ARGV_GLOBAL_ILLUMINATION] ="ARGV: Unrecognized global illumination model.", [ERR_ARGV_LIGHT_ATTENUATION] = "ARGV: Unrecognized light attenuation.", [ERR_ARGV_UNRECOGNIZED] = "ARGV: Unrecognized argument. Use --help to find out which arguments can be used.", [ERR_ARGV_IO_OPEN_SCENE] = "ARGV:I/O : Unable to open scene file.", [ERR_ARGV_IO_OPEN_OUTPUT] = "ARGV:I/O : Unable to open output file.", [ERR_IO_WRITE_IMG] = "I/O : Unable to write to image file.", [ERR_JSON_KEY_NOT_STRING] = "JSON: Key is not a string.", [ERR_JSON_UNRECOGNIZED_KEY] = "JSON: Unrecognized key.", [ERR_JSON_VALUE_NOT_NUMBER] = "JSON: Value is not a number.", [ERR_JSON_VALUE_NOT_ARRAY] = "JSON: Value is not an array.", [ERR_JSON_VALUE_NOT_STRING] = "JSON: Value is not a string.", [ERR_JSON_VALUE_NOT_OBJECT] = "JSON: Value is not an object.", [ERR_JSON_ARRAY_SIZE] = "JSON: Array contains an incorrect amount of values.", [ERR_JSON_FILENAME_NOT_STRING] ="JSON: Filename is not string.", [ERR_JSON_IO_OPEN] = "JSON:I/O : Unable to open file specified in JSON file.", [ERR_JSON_ARGC] = "JSON: Object has an incorrect number of elements.", [ERR_JSON_UNRECOGNIZED] = "JSON: Unrecognized element in Object.", [ERR_JSON_IO_READ] = "JSON:I/O : Unable to read file.", [ERR_JSON_NUM_TOKENS] = "JSON: Too many tokens.", [ERR_JSON_READ_TOKENS] = "JSON: Unable to read correct amount of tokens.", [ERR_JSON_FIRST_TOKEN] = "JSON: First token is not Object.", [ERR_JSON_ARGC_SCENE] = "JSON: Unrecognized Object in scene.", [ERR_JSON_CAMERA_FOV] = "JSON: Camera FOV is outside of interval (0, 180).", [ERR_JSON_INVALID_MATERIAL] = "JSON: Material with stated id does not exist.", [ERR_MALLOC] = "MEM : Unable to allocate memory on heap.", [ERR_JSON_NO_CAMERA] = "JSON: Unable to find camera in scene.", [ERR_JSON_NO_OBJECTS] = "JSON: Unable to find objects in scene.", [ERR_JSON_NO_LIGHTS] = "JSON: Unable to find lights in scene.", [ERR_JSON_NO_MATERIALS] = "JSON: Unable to find materials in scene.", [ERR_JSON_EMITTANT] = "JSON: Object cannot be emittant.", [ERR_STL_IO_FP] = "STL :I/O : Unable to move file pointer.", [ERR_STL_IO_READ] = "STL :I/O : Unable to read file.", [ERR_STL_ENCODING] = "STL : File uses ASCII encoding.", }; const char *HELPTEXT = "\ Renders a scene using raytracing.\n\ \n\ ./engine <input> <output> <resolution> [OPTIONAL_PARAMETERS]\n\ \n\ REQUIRED PARAMETERS:\n\ <input> (string) : .json scene file which will be used to generate the image. Example files can be found in scenes.\n\ <output> (string) : .ppm file to which the image will be saved.\n\ <resolution> (integer) (integer) : resolution of the output image.\n\ OPTIONAL PARAMETERS:\n\ [-m] (integer | \"max\") : DEFAULT = 1 : number of CPU cores\n\ [-b] (integer) : DEFAULT = 10 : maximum number of times that a light ray can bounce.\n\ [-a] (float) : DEFAULT = 0.01 : minimum light intensity for which a ray is cast\n\ [-s] (\"phong\" | \"blinn\") : DEFAULT = phong : reflection model\n\ [-n] (integer) : DEFAULT = 1 : number of samples which are rendered per pixel\n\ [-c] : DEFAULT = OFF : normalize values of pixels so that entire color spectrum is utilized\n\ [-l] (\"none\" | \"lin\" | \"sqr\") : DEFAULT = sqr : light attenuation\n\ [-p] (\"none\" | \"real\" | \"cpu\") : DEFAULT = real : time to print with status messages\n\ [-g] (string) : DEFAULT = ambient : global illumination model\n\ ambient : ambient lighting\n\ path : path-tracing\n"; /******************************************************************************* * STRUCTURE DEFINITION *******************************************************************************/ typedef struct Context { FILE *scene_file; FILE *output_file; Object **objects; size_t num_objects; size_t objects_size; //Stores capacity of objects. After objects are loaded, should be equal to num_objects #ifdef UNBOUND_OBJECTS Object **unbound_objects; //Since planes do not have a bounding cuboid and cannot be included in the BVH, they must be looked at separately size_t num_unbound_objects; #endif Object **emittant_objects; size_t num_emittant_objects; Material *materials; size_t num_materials; Camera *camera; BVH *bvh; struct timespec start_timespec; clock_t start_clock; float light_attenuation_offset; float brightness; Vec3 global_ambient_light_intensity; uint32_t max_bounces; float minimum_light_intensity_sqr; ReflectionModel reflection_model; GlobalIlluminationModel global_illumination_model; LogOption log_option; uint32_t resolution[2]; size_t samples_per_pixel; bool normalize_colors; LightAttenuation light_attenuation; } Context; typedef struct Line { float vector[3]; float position[3]; } Line; typedef struct STLTriangle { float normal[3]; //normal is unreliable so it is not used. float vertices[3][3]; uint16_t attribute_bytes; //attribute bytes is unreliable so it is not used. } __attribute__ ((packed)) STLTriangle; /* Camera */ typedef struct Image { uint32_t resolution[2]; Vec2 size; Vec3 corner; //Top left corner of image Vec3 vectors[2]; //Vectors for image plane traversal by 1 pixel in X and Y directions Color *pixels; } Image; typedef struct Camera { Vec3 position; Vec3 vectors[3]; //vectors are perpendicular to eachother and normalized. vectors[3] is normal to projection_plane. float fov; float focal_length; Image image; } Camera; /* BoundingCuboid */ typedef struct BoundingCuboid { float epsilon; Vec3 corners[2]; } BoundingCuboid; /* Material */ typedef struct Material { int32_t id; Vec3 ks; /*specular reflection constant*/ Vec3 ka; /*ambient reflection constant*/ Vec3 kr; /*specular interreflection constant*/ Vec3 kt; /*transparency constant*/ Vec3 ke; /*emittance constant*/ float shininess; /*shininess constant*/ float refractive_index; Texture *texture; bool reflective; bool transparent; bool emittant; } Material; typedef struct Texture { void (*get_color)(const Texture*, const Vec3, Vec3); } Texture; typedef struct TextureUniform { Texture texture; Vec3 color; } TextureUniform; typedef struct TextureCheckerboard { Texture texture; Vec3 colors[2]; float scale; } TextureCheckerboard; typedef struct TextureBrick { Texture texture; Vec3 colors[2]; float scale; float mortar_width; } TextureBrick; typedef struct TextureNoisyPeriodic { Texture texture; Vec3 color; Vec3 color_gradient; float noise_feature_scale; float noise_scale; float frequency_scale; PeriodicFunction func; } TextureNoisyPeriodic; /* Object */ typedef struct Object { ObjectData const *object_data; uint32_t num_lights; float epsilon; Material *material; } Object; typedef struct Sphere { Object object; Vec3 position; float radius; } Sphere; typedef struct Triangle {//triangle ABC Object object; Vec3 vertices[3]; Vec3 edges[2]; //Vectors BA and CA Vec3 normal; } Triangle; #ifdef UNBOUND_OBJECTS typedef struct Plane {//normal = {a,b,c}, ax + by + cz = d Object object; Vec3 normal; float d; } Plane; #endif typedef struct ObjectData { char *name; #ifdef UNBOUND_OBJECTS bool is_bounded; #endif bool (*get_intersection)(const Object*, const Line*, float*, Vec3); bool (*intersects_in_range)(const Object*, const Line*, float); void (*delete)(Object*); BoundingCuboid *(*generate_bounding_cuboid)(const Object*); void (*get_corners)(const Object*, Vec3[2]); void (*scale)(const Object*, const Vec3, const float); void (*get_light_point)(const Object*, const Vec3, Vec3); } ObjectData; /* BVH */ typedef union BVHChild { BVH *bvh; Object *object; } BVHChild; typedef struct BVH { bool is_leaf; BoundingCuboid *bounding_cuboid; BVHChild children[]; } BVH; typedef struct BVHWithMorton { //Only used when constructing BVH tree uint32_t morton_code; BVH *bvh; } BVHWithMorton; /******************************************************************************* * FUNCTION PROTOTYPE *******************************************************************************/ /* ALGORITHM */ float sqr(const float val); float mag2(const Vec2 vec); float mag3(const Vec3 vec); float dot2(const Vec2 vec1, const Vec2 vec2); float dot3(const Vec3 vec1, const Vec3 vec2); void cross(const Vec3 vec1, const Vec3 vec2, Vec3 result); void mul2s(const Vec2 vec, const float mul, Vec2 result); void mul3s(const Vec3 vec, const float mul, Vec3 result); void mul3v(const Vec3 vec1, const Vec3 vec2, Vec3 result); void inv3(Vec3 vec); void add2v(const Vec2 vec1, const Vec2 vec2, Vec2 result); void add2s(const Vec2 vec1, const float summand, Vec2 result); void add3v(const Vec3 vec1, const Vec3 vec2, Vec3 result); void add3s(const Vec3 vec1, const float summand, Vec3 result); void add3v3(const Vec3 vec1, const Vec3 vec2, const Vec3 vec3, Vec3 result); void sub2v(const Vec2 vec1, const Vec2 vec2, Vec2 result); void sub2s(const Vec2 vec1, const float subtrahend, Vec2 result); void sub3v(const Vec3 vec1, const Vec3 vec2, Vec3 result); void sub3s(const Vec3 vec1, const float subtrahend, Vec3 result); void norm2(Vec2 vec); void norm3(Vec3 vec); float max3(const Vec3 vec); float min3(const Vec3 vec); float clamp(const float num, const float min, const float max); void clamp3(const Vec3 vec, const Vec3 min, const Vec3 max, Vec3 result); float magsqr3(const Vec3 vec); void mulm3(Mat3 mat, const float *restrict vec, float *restrict result); void mulms(Mat3 mat, const float mul, Mat3 result); float rand_flt(void); bool moller_trumbore(const Vec3 vertex, Vec3 edges[2], const Vec3 line_position, const Vec3 line_vector, const float epsilon, float *distance); bool line_intersects_sphere(const Vec3 sphere_position, const float sphere_radius, const Vec3 line_position, const Vec3 line_vector, const float epsilon, float *distance); uint32_t djb_hash(const char* cp); uint32_t expand_bits(uint32_t num); uint32_t morton_code(const Vec3 vec); /* Context */ Context *context_new(void); void context_delete(Context *context); /* Camera */ Camera *camera_new(const Vec3 position, Vec3 vectors[2], const float fov, const float focal_length, Context *context); void camera_delete(Camera *camera); Camera *camera_load(const cJSON *json, Context *context); void save_image(FILE *file, const Image *image); void camera_scale(Camera *camera, const Vec3 neg_shift, const float scale); /* Material */ void material_init(Material *material, const int32_t id, const Vec3 ks, const Vec3 ka, const Vec3 kr, const Vec3 kt, const Vec3 ke, const float shininess, const float refractive_index, Texture * const texture); void material_load(const cJSON *json, Material *material); Material *get_material(const Context *context, const int32_t id); Texture *texture_load(const cJSON *json); void texture_get_color_uniform(const Texture *tex, const Vec3 point, Vec3 color); void texture_get_color_checkerboard(const Texture *tex, const Vec3 point, Vec3 color); void texture_get_color_brick(const Texture *tex, const Vec3 point, Vec3 color); void texture_get_color_noisy_periodic(const Texture *tex, const Vec3 point, Vec3 color); /* Object */ void object_add(Object * const object, Context *context); void object_load(const cJSON *json, const Context *context, Object *object, const ObjectType object_type); #ifdef UNBOUND_OBJECTS void unbound_objects_get_closest_intersection(const Context *context, const Line *ray, Object **closest_object, Vec3 closest_normal, float *closest_distance); bool unbound_objects_is_light_blocked(const Context *context, const Line *ray, const float distance, Vec3 light_intensity, const Object *emittant_object); #endif /* Sphere */ void sphere_delete(Object *object); Sphere *sphere_load(const cJSON *json, Context *context); bool sphere_get_intersection(const Object *object, const Line *ray, float *distance, Vec3 normal); bool sphere_intersects_in_range(const Object *object, const Line *ray, const float min_distance); BoundingCuboid *sphere_generate_bounding_cuboid(const Object *object); void sphere_get_corners(const Object *object, Vec3 corners[2]); void sphere_scale(const Object *object, const Vec3 neg_shift, const float scale); void sphere_get_light_point(const Object *object, const Vec3 point, Vec3 light_point); /* Triangle */ void triangle_init(Triangle *triangle); void triangle_delete(Object *object); Triangle *triangle_load(const cJSON *json, Context *context); bool triangle_get_intersection(const Object *object, const Line *ray, float *distance, Vec3 normal); bool triangle_intersects_in_range(const Object *object, const Line *ray, float min_distance); BoundingCuboid *triangle_generate_bounding_cuboid(const Object *object); void triangle_get_corners(const Object *object, Vec3 corners[2]); void triangle_scale(const Object *object, const Vec3 neg_shift, const float scale); void triangle_get_light_point(const Object *object, const Vec3 point, Vec3 light_point); /* Plane */ #ifdef UNBOUND_OBJECTS void plane_delete(Object *object); Plane *plane_load(const cJSON *json, Context *context); bool plane_get_intersection(const Object *object, const Line *ray, float *distance, Vec3 normal); bool plane_intersects_in_range(const Object *object, const Line *ray, float min_distance); void plane_scale(const Object *object, const Vec3 neg_shift, const float scale); #endif /* Mesh */ void mesh_load(const cJSON *json, Context *context); uint32_t stl_get_num_triangles(FILE *file); void stl_load_objects(Context *context, FILE *file, const Object *object, const Vec3 position, const Vec3 rot, const float scale); /* BoundingCuboid */ BoundingCuboid *bounding_cuboid_new(const float epsilon, Vec3 corners[2]); void bounding_cuboid_delete(BoundingCuboid *bounding_cuboid); bool bounding_cuboid_intersects(const BoundingCuboid *cuboid, const Line *ray, float *tmax, float *tmin); /* BVH */ BVH *bvh_new(const bool is_leaf, BoundingCuboid *bounding_cuboid); void bvh_delete(BVH *bvh); int bvh_morton_code_compare(const void *p1, const void *p2); BoundingCuboid *bvh_generate_bounding_cuboid_leaf(const BVHWithMorton *leaf_array, const size_t first, const size_t last); BoundingCuboid *bvh_generate_bounding_cuboid_node(const BVH *bvh_left, const BVH *bvh_right); BVH *bvh_generate_node(const BVHWithMorton *leaf_array, const size_t first, const size_t last); void bvh_generate(Context *context); void bvh_get_closest_intersection(const BVH *bvh, const Line *ray, Object **closest_object, Vec3 closest_normal, float *closest_distance); bool bvh_is_light_blocked(const BVH *bvh, const Line *ray, const float distance, Vec3 light_intensity, const Object *emittant_object); #ifdef DEBUG void bvh_print(const BVH *bvh, const uint32_t depth); #endif /* SCENE */ void cJSON_parse_float_array(const cJSON *json, float *array); void scene_load(Context *context); /* MISC */ void err(const ErrorCode error_code); void get_closest_intersection(const Context *context, const Line *ray, Object **closest_object, Vec3 closest_normal, float *closest_distance); bool is_light_blocked(const Context *context, const Line *ray, const float distance, Vec3 light_intensity, const Object *emittant_object); void normalize_scene(Context *context); void cast_ray(const Context *context, const Line *ray, const Vec3 kr, Vec3 color, const uint32_t bounce_count, Object *inside_object); void create_image(const Context *context); void process_arguments(int argc, char *argv[], Context *context); void log_msg(const Context *context, const char *msg); int main(int argc, char *argv[]); /* ObjectData */ const ObjectData OBJECT_DATA[] = { #ifdef UNBOUND_OBJECTS [OBJECT_PLANE] = { .name = "Plane", .is_bounded = false, .get_intersection = &plane_get_intersection, .intersects_in_range = &plane_intersects_in_range, .delete = &plane_delete, .scale = &plane_scale, }, #endif [OBJECT_SPHERE] = { .name = "Sphere", #ifdef UNBOUND_OBJECTS .is_bounded = true, #endif .get_intersection = &sphere_get_intersection, .intersects_in_range = &sphere_intersects_in_range, .delete = &sphere_delete, .generate_bounding_cuboid = &sphere_generate_bounding_cuboid, .get_corners = &sphere_get_corners, .scale = &sphere_scale, .get_light_point = &sphere_get_light_point, }, [OBJECT_TRIANGLE] = { .name = "Triangle", #ifdef UNBOUND_OBJECTS .is_bounded = true, #endif .get_intersection = &triangle_get_intersection, .intersects_in_range = &triangle_intersects_in_range, .delete = &triangle_delete, .generate_bounding_cuboid = &triangle_generate_bounding_cuboid, .get_corners = &triangle_get_corners, .scale = &triangle_scale, .get_light_point = &triangle_get_light_point, }, }; /******************************************************************************* * ALGORITHM *******************************************************************************/ __attribute__((const)) float sqr(const float val) { return val * val; } __attribute__((const)) float mag2(const Vec2 vec) { return sqrtf(sqr(vec[X]) + sqr(vec[Y])); } __attribute__((const)) float mag3(const Vec3 vec) { return sqrtf(magsqr3(vec)); } __attribute__((const)) float dot2(const Vec2 vec1, const Vec2 vec2) { return vec1[X] * vec2[X] + vec1[Y] * vec2[Y]; } __attribute__((const)) float dot3(const Vec3 vec1, const Vec3 vec2) { return vec1[X] * vec2[X] + vec1[Y] * vec2[Y] + vec1[Z] * vec2[Z]; } void cross(const Vec3 vec1, const Vec3 vec2, Vec3 result) { result[X] = vec1[Y] * vec2[Z] - vec1[Z] * vec2[Y]; result[Y] = vec1[Z] * vec2[X] - vec1[X] * vec2[Z]; result[Z] = vec1[X] * vec2[Y] - vec1[Y] * vec2[X]; } void mul2s(const Vec2 vec, const float mul, Vec2 result) { result[X] = vec[X] * mul; result[Y] = vec[Y] * mul; } void mul3s(const Vec3 vec, const float mul, Vec3 result) { result[X] = vec[X] * mul; result[Y] = vec[Y] * mul; result[Z] = vec[Z] * mul; } void mul3v(const Vec3 vec1, const Vec3 vec2, Vec3 result) { result[X] = vec1[X] * vec2[X]; result[Y] = vec1[Y] * vec2[Y]; result[Z] = vec1[Z] * vec2[Z]; } void inv3(Vec3 vec) { vec[X] = 1.f / vec[X]; vec[Y] = 1.f / vec[Y]; vec[Z] = 1.f / vec[Z]; } void add2v(const Vec2 vec1, const Vec2 vec2, Vec2 result) { result[X] = vec1[X] + vec2[X]; result[Y] = vec1[Y] + vec2[Y]; } void add2s(const Vec2 vec1, const float summand, Vec2 result) { result[X] = vec1[X] + summand; result[Y] = vec1[Y] + summand; } void add3v(const Vec3 vec1, const Vec3 vec2, Vec3 result) { result[X] = vec1[X] + vec2[X]; result[Y] = vec1[Y] + vec2[Y]; result[Z] = vec1[Z] + vec2[Z]; } void add3s(const Vec3 vec1, const float summand, Vec3 result) { result[X] = vec1[X] + summand; result[Y] = vec1[Y] + summand; result[Z] = vec1[Z] + summand; } void add3v3(const Vec3 vec1, const Vec3 vec2, const Vec3 vec3, Vec3 result) { result[X] = vec1[X] + vec2[X] + vec3[X]; result[Y] = vec1[Y] + vec2[Y] + vec3[Y]; result[Z] = vec1[Z] + vec2[Z] + vec3[Z]; } void sub2v(const Vec2 vec1, const Vec2 vec2, Vec2 result) { result[X] = vec1[X] - vec2[X]; result[Y] = vec1[Y] - vec2[Y]; } void sub2s(const Vec2 vec1, const float subtrahend, Vec2 result) { result[X] = vec1[X] - subtrahend; result[Y] = vec1[Y] - subtrahend; } void sub3v(const Vec3 vec1, const Vec3 vec2, Vec3 result) { result[X] = vec1[X] - vec2[X]; result[Y] = vec1[Y] - vec2[Y]; result[Z] = vec1[Z] - vec2[Z]; } void sub3s(const Vec3 vec1, const float subtrahend, Vec3 result) { result[X] = vec1[X] - subtrahend; result[Y] = vec1[Y] - subtrahend; result[Z] = vec1[Z] - subtrahend; } void norm2(Vec2 vec) { mul2s(vec, 1.f / mag2(vec), vec); } void norm3(Vec3 vec) { mul3s(vec, 1.f / mag3(vec), vec); } __attribute__((const)) float min3(const Vec3 vec) { float min = vec[0]; if (min > vec[1]) min = vec[1]; if (min > vec[2]) min = vec[2]; return min; } __attribute__((const)) float max3(const Vec3 vec) { float max = vec[0]; if (max < vec[1]) max = vec[1]; if (max < vec[2]) max = vec[2]; return max; } __attribute__((const)) float clamp(const float num, const float min, const float max) { const float result = num < min ? min : num; return result > max ? max : result; } void clamp3(const Vec3 vec, const Vec3 min, const Vec3 max, Vec3 result) { result[X] = clamp(vec[X], min[X], max[X]); result[Y] = clamp(vec[Y], min[Y], max[Y]); result[Z] = clamp(vec[Z], min[Z], max[Z]); } __attribute__((const)) float magsqr3(const Vec3 vec) { return sqr(vec[X]) + sqr(vec[Y]) + sqr(vec[Z]); } void mulm3(Mat3 mat, const float *restrict vec, float *restrict result) { result[X] = dot3(mat[X], vec); result[Y] = dot3(mat[Y], vec); result[Z] = dot3(mat[Z], vec); } void mulms(Mat3 mat, const float mul, Mat3 result) { mul3s(mat[X], mul, result[X]); mul3s(mat[Y], mul, result[Y]); mul3s(mat[Z], mul, result[Z]); } float rand_flt(void) { return rand() / (float)RAND_MAX; } //Möller–Trumbore intersection algorithm bool moller_trumbore(const Vec3 vertex, Vec3 edges[2], const Vec3 line_position, const Vec3 line_vector, const float epsilon, float *distance) { float a, f, u, v; Vec3 h, s, q; cross(line_vector, edges[1], h); a = dot3(edges[0], h); if (unlikely(a < epsilon && a > -epsilon)) //ray is parallel to line return false; f = 1.f / a; sub3v(line_position, vertex, s); u = f * dot3(s, h); if (u < 0.f || u > 1.f) return false; cross(s, edges[0], q); v = f * dot3(line_vector, q); if (v < 0.f || u + v > 1.f) return false; *distance = f * dot3(edges[1], q); return *distance > epsilon; } bool line_intersects_sphere(const Vec3 sphere_position, const float sphere_radius, const Vec3 line_position, const Vec3 line_vector, const float epsilon, float *distance) { Vec3 relative_position; sub3v(line_position, sphere_position, relative_position); float b = -dot3(line_vector, relative_position); float c = dot3(relative_position, relative_position) - sqr(sphere_radius); float det = sqr(b) - c; if (det < 0) //no collision return false; float sqrt_det = sqrtf(det); *distance = b - sqrt_det; if (*distance > epsilon)//if in front of origin of ray return true; *distance = b + sqrt_det; return *distance > epsilon; //check if the further distance is positive } // D. J. Bernstein hash function uint32_t djb_hash(const char* cp) { uint32_t hash = 5381; while (*cp) hash = 33 * hash ^ (uint8_t) *cp++; return hash; } //Expands a number to only use 1 in every 3 bits uint32_t expand_bits(uint32_t num) { num = (num * 0x00010001u) & 0xFF0000FFu; num = (num * 0x00000101u) & 0x0F00F00Fu; num = (num * 0x00000011u) & 0xC30C30C3u; num = (num * 0x00000005u) & 0x49249249u; return num; } //Calcualtes 30-bit morton code for a point in cube [0,1]. Does not perform bounds checking uint32_t morton_code(const Vec3 vec) { return expand_bits((uint32_t)(1023.f * vec[X])) * 4u + expand_bits((uint32_t)(1023.f * vec[Y])) * 2u + expand_bits((uint32_t)(1023.f * vec[Z])); } /******************************************************************************* * Context *******************************************************************************/ Context *context_new(void) { Context *context = malloc(sizeof(Context)); *context = (Context) { .light_attenuation_offset = 1.f, .brightness = 1.f, .max_bounces = 10, .minimum_light_intensity_sqr = .01f * .01f, .global_illumination_model = GLOBAL_ILLUMINATION_AMBIENT, .reflection_model = REFLECTION_PHONG, .log_option = LOG_REALTIME, .samples_per_pixel = 1, .normalize_colors = false, .light_attenuation = LIGHT_ATTENUATION_SQUARE, }; timespec_get(&context->start_timespec, TIME_UTC); context->start_clock = clock(); return context; } void context_delete(Context *context) { size_t i; for (i = 0; i < context->num_objects; i++) context->objects[i]->object_data->delete(context->objects[i]); camera_delete(context->camera); bvh_delete(context->bvh); free(context->objects); #ifdef UNBOUND_OBJECTS free(context->unbound_objects); #endif free(context->emittant_objects); free(context->materials); free(context); } /******************************************************************************* * Camera *******************************************************************************/ Camera *camera_new(const Vec3 position, Vec3 vectors[2], const float fov, const float focal_length, Context *context) { Camera *camera = malloc(sizeof(Camera)); camera->fov = fov; camera->focal_length = focal_length; memcpy(camera->position, position, sizeof(Vec3)); memcpy(camera->vectors, vectors, sizeof(Vec3[2])); norm3(camera->vectors[0]); norm3(camera->vectors[1]); cross(camera->vectors[0], camera->vectors[1], camera->vectors[2]); camera->focal_length = focal_length; memcpy(camera->image.resolution, context->resolution, 2 * sizeof(int)); camera->image.size[X] = 2 * focal_length * tanf(fov * PI / 360.f); camera->image.size[Y] = camera->image.size[X] * camera->image.resolution[Y] / camera->image.resolution[X]; camera->image.pixels = malloc(context->resolution[X] * context->resolution[Y] * sizeof(Color)); Vec3 focal_vector, plane_center, corner_offset_vectors[2]; mul3s(camera->vectors[2], camera->focal_length, focal_vector); add3v(focal_vector, camera->position, plane_center); mul3s(camera->vectors[0], camera->image.size[X] / camera->image.resolution[X], camera->image.vectors[0]); mul3s(camera->vectors[1], camera->image.size[Y] / camera->image.resolution[Y], camera->image.vectors[1]); mul3s(camera->image.vectors[X], .5f - camera->image.resolution[X] / 2.f, corner_offset_vectors[X]); mul3s(camera->image.vectors[Y], .5f - camera->image.resolution[Y] / 2.f, corner_offset_vectors[Y]); add3v3(plane_center, corner_offset_vectors[X], corner_offset_vectors[Y], camera->image.corner); return camera; } void camera_delete(Camera *camera) { free(camera->image.pixels); free(camera); } Camera *camera_load(const cJSON *json, Context *context) { err_assert(cJSON_GetArraySize(json) == 5, ERR_JSON_ARGC); cJSON *json_position = cJSON_GetObjectItemCaseSensitive(json, "position"), *json_vector_x = cJSON_GetObjectItemCaseSensitive(json, "vector_x"), *json_vector_y = cJSON_GetObjectItemCaseSensitive(json, "vector_y"), *json_fov = cJSON_GetObjectItemCaseSensitive(json, "fov"), *json_focal_length = cJSON_GetObjectItemCaseSensitive(json, "focal_length"); err_assert(cJSON_IsArray(json_position) && cJSON_IsArray(json_vector_x) && cJSON_IsArray(json_vector_y), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_position) == 3 && cJSON_GetArraySize(json_vector_x) == 3 && cJSON_GetArraySize(json_vector_y) == 3, ERR_JSON_ARRAY_SIZE); err_assert(cJSON_IsNumber(json_fov) && cJSON_IsNumber(json_focal_length), ERR_JSON_VALUE_NOT_NUMBER); float fov = json_fov->valuedouble, focal_length = json_focal_length->valuedouble; Vec3 position, vectors[2]; err_assert(fov > 0.f && fov < 180.f, ERR_JSON_CAMERA_FOV); cJSON_parse_float_array(json_position, position); cJSON_parse_float_array(json_vector_x, vectors[0]); cJSON_parse_float_array(json_vector_y, vectors[1]); return camera_new(position, vectors, fov, focal_length, context); } void camera_scale(Camera *camera, const Vec3 neg_shift, const float scale) { sub3v(camera->position, neg_shift, camera->position); mul3s(camera->position, scale, camera->position); camera->focal_length *= scale; Image *image = &camera->image; mul2s(image->size, scale, image->size); sub3v(image->corner, neg_shift, image->corner); mul3s(image->corner, scale, image->corner); mul3s(image->vectors[0], scale, image->vectors[0]); mul3s(image->vectors[1], scale, image->vectors[1]); } void save_image(FILE *file, const Image *image) { err_assert(fprintf(file, "P6\n%u %u\n255\n", image->resolution[X], image->resolution[Y]) > 0, ERR_IO_WRITE_IMG); size_t num_pixels = image->resolution[X] * image->resolution[Y]; err_assert(fwrite(image->pixels, sizeof(Color), num_pixels, file) == num_pixels, ERR_IO_WRITE_IMG); } /******************************************************************************* * BVH *******************************************************************************/ //TODO: remove cuboid from new BVH *bvh_new(const bool is_leaf, BoundingCuboid *bounding_cuboid) { BVH *bvh = malloc(sizeof(BVH) + (is_leaf ? 1 : 2) * sizeof(BVHChild)); bvh->is_leaf = is_leaf; bvh->bounding_cuboid = bounding_cuboid; return bvh; } void bvh_delete(BVH *bvh) { bounding_cuboid_delete(bvh->bounding_cuboid); if (!bvh->is_leaf) { bvh_delete(bvh->children[0].bvh); bvh_delete(bvh->children[1].bvh); } free(bvh); } int bvh_morton_code_compare(const void *p1, const void *p2) { return (int)((BVHWithMorton*)p1)->morton_code - (int)((BVHWithMorton*)p2)->morton_code; } BoundingCuboid *bvh_generate_bounding_cuboid_leaf(const BVHWithMorton *leaf_array, const size_t first, const size_t last) { Vec3 corners[2] = {{FLT_MAX}, {FLT_MIN}}; float epsilon = 0.f; size_t i, j; for (i = first; i <= last; i++) { BoundingCuboid *bounding_cuboid = leaf_array[i].bvh->bounding_cuboid; if (epsilon < bounding_cuboid->epsilon) epsilon = bounding_cuboid->epsilon; for (j = 0; j < 3; j++) { if (bounding_cuboid->corners[0][j] < corners[0][j]) corners[0][j] = bounding_cuboid->corners[0][j]; if (bounding_cuboid->corners[1][j] > corners[1][j]) corners[1][j] = bounding_cuboid->corners[1][j]; } } return bounding_cuboid_new(epsilon, corners); } BoundingCuboid *bvh_generate_bounding_cuboid_node(const BVH *bvh_left, const BVH *bvh_right) { BoundingCuboid *left = bvh_left->bounding_cuboid, *right = bvh_right->bounding_cuboid; float epsilon = fmaxf(left->epsilon, right->epsilon); Vec3 corners[2] = { { fminf(left->corners[0][X], right->corners[0][X]), fminf(left->corners[0][Y], right->corners[0][Y]), fminf(left->corners[0][Z], right->corners[0][Z]), }, { fmaxf(left->corners[1][X], right->corners[1][X]), fmaxf(left->corners[1][Y], right->corners[1][Y]), fmaxf(left->corners[1][Z], right->corners[1][Z]), }, }; return bounding_cuboid_new(epsilon, corners); } BVH *bvh_generate_node(const BVHWithMorton *leaf_array, const size_t first, const size_t last) { if (first == last) return leaf_array[first].bvh; uint32_t first_code = leaf_array[first].morton_code; uint32_t last_code = leaf_array[last].morton_code; size_t split; if (first_code == last_code) { split = (first + last) / 2; } else { split = first; uint32_t common_prefix = clz(first_code ^ last_code); size_t step = last - first; do { step = (step + 1) >> 1; // exponential decrease size_t new_split = split + step; // proposed new position if (new_split < last) { uint32_t split_code = leaf_array[new_split].morton_code; if (first_code ^ split_code) { uint32_t split_prefix = clz(first_code ^ split_code); if (split_prefix > common_prefix) split = new_split; // accept proposal } } } while (step > 1); } BVH *bvh_left = bvh_generate_node(leaf_array, first, split); BVH *bvh_right = bvh_generate_node(leaf_array, split + 1, last); BVH *bvh = bvh_new(false, bvh_generate_bounding_cuboid_node(bvh_left, bvh_right)); bvh->children[0].bvh = bvh_left; bvh->children[1].bvh = bvh_right; return bvh; } void bvh_generate(Context *context) { #ifdef UNBOUND_OBJECTS size_t num_leaves = context->num_objects - context->num_unbound_objects; #else size_t num_leaves = context->num_objects; #endif BVHWithMorton *leaf_array = malloc(sizeof(BVHWithMorton) * num_leaves); size_t i, j = 0; for (i = 0; i < context->num_objects; i++) { Object *object = context->objects[i]; #ifdef UNBOUND_OBJECTS if (object->object_data->is_bounded) { #endif BVH *bvh = bvh_new(true, object->object_data->generate_bounding_cuboid(object)); bvh->children[0].object = object; leaf_array[j++].bvh = bvh; #ifdef UNBOUND_OBJECTS } #endif } for (i = 0; i < num_leaves; i++) { BoundingCuboid *bounding_cuboid = leaf_array[i].bvh->bounding_cuboid; Vec3 norm_position; add3v(bounding_cuboid->corners[0], bounding_cuboid->corners[1], norm_position); mul3s(norm_position, .5f, norm_position); leaf_array[i].morton_code = morton_code(norm_position); } qsort(leaf_array, num_leaves, sizeof(BVHWithMorton), &bvh_morton_code_compare); context->bvh = bvh_generate_node(leaf_array, 0, num_leaves - 1); free(leaf_array); } void bvh_get_closest_intersection(const BVH *bvh, const Line *ray, Object **closest_object, Vec3 closest_normal, float *closest_distance) { if (bvh->is_leaf) { Vec3 normal; Object *object = bvh->children[0].object; float distance; if (object->object_data->get_intersection(object, ray, &distance, normal) && distance < *closest_distance) { *closest_distance = distance; *closest_object = object; memcpy(closest_normal, normal, sizeof(Vec3)); } return; } bool intersect_l, intersect_r; float tmin_l, tmin_r, tmax; intersect_l = bounding_cuboid_intersects(bvh->children[0].bvh->bounding_cuboid, ray, &tmax, &tmin_l) && tmin_l < *closest_distance; intersect_r = bounding_cuboid_intersects(bvh->children[1].bvh->bounding_cuboid, ray, &tmax, &tmin_r) && tmin_r < *closest_distance; if (intersect_l && intersect_r) { if (tmin_l < tmin_r) { bvh_get_closest_intersection(bvh->children[0].bvh, ray, closest_object, closest_normal, closest_distance); bvh_get_closest_intersection(bvh->children[1].bvh, ray, closest_object, closest_normal, closest_distance); } else { bvh_get_closest_intersection(bvh->children[1].bvh, ray, closest_object, closest_normal, closest_distance); bvh_get_closest_intersection(bvh->children[0].bvh, ray, closest_object, closest_normal, closest_distance); } } else if (intersect_l) { bvh_get_closest_intersection(bvh->children[0].bvh, ray, closest_object, closest_normal, closest_distance); } else if (intersect_r) { bvh_get_closest_intersection(bvh->children[1].bvh, ray, closest_object, closest_normal, closest_distance); } } bool bvh_is_light_blocked(const BVH *bvh, const Line *ray, const float distance, Vec3 light_intensity, const Object *emittant_object) { float tmin, tmax; if (bvh->is_leaf) { Vec3 normal; Object *object = bvh->children[0].object; if (object == emittant_object) return false; if (object->object_data->get_intersection(object, ray, &tmin, normal) && tmin < distance) { if (object->material->transparent) mul3v(light_intensity, object->material->kt, light_intensity); else return true; } return false; } size_t i; #pragma GCC unroll 2 for (i = 0; i < 2; i++) { if (bounding_cuboid_intersects(bvh->children[i].bvh->bounding_cuboid, ray, &tmax, &tmin) && tmin < distance && bvh_is_light_blocked(bvh->children[i].bvh, ray, distance, light_intensity, emittant_object)) return true; } return false; } #ifdef DEBUG void bvh_print(const BVH *bvh, const uint32_t depth) { uint32_t i; size_t j; for (i = 0; i < depth; i++) printf("\t"); if (bvh->is_leaf) { printf("%s\n", bvh->children[0].object->object_data->name); } else { printf("NODE\n"); for (j = 0; j < 2; j++) bvh_print(bvh->children[j].bvh, depth + 1); } } #endif /******************************************************************************* * Material *******************************************************************************/ void material_init(Material *material, const int32_t id, const Vec3 ks, const Vec3 ka, const Vec3 kr, const Vec3 kt, const Vec3 ke, const float shininess, const float refractive_index, Texture * const texture) { material->id = id; memcpy(material->ks, ks, sizeof(Vec3)); memcpy(material->ka, ka, sizeof(Vec3)); memcpy(material->kr, kr, sizeof(Vec3)); memcpy(material->kt, kt, sizeof(Vec3)); memcpy(material->ke, ke, sizeof(Vec3)); material->shininess = shininess; material->refractive_index = refractive_index; material->texture = texture; material->emittant = mag3(ke) > MATERIAL_THRESHOLD; material->reflective = mag3(kr) > MATERIAL_THRESHOLD; material->transparent = mag3(kt) > MATERIAL_THRESHOLD; } void material_load(const cJSON *json, Material *material) { cJSON *json_id = cJSON_GetObjectItemCaseSensitive(json, "id"), *json_ks = cJSON_GetObjectItemCaseSensitive(json, "ks"), *json_ka = cJSON_GetObjectItemCaseSensitive(json, "ka"), *json_kr = cJSON_GetObjectItemCaseSensitive(json, "kr"), *json_kt = cJSON_GetObjectItemCaseSensitive(json, "kt"), *json_ke = cJSON_GetObjectItemCaseSensitive(json, "ke"), *json_shininess = cJSON_GetObjectItemCaseSensitive(json, "shininess"), *json_refractive_index = cJSON_GetObjectItemCaseSensitive(json, "refractive_index"), *json_texture = cJSON_GetObjectItemCaseSensitive(json, "texture"); err_assert(cJSON_IsNumber(json_shininess) && cJSON_IsNumber(json_refractive_index) && cJSON_IsNumber(json_id), ERR_JSON_VALUE_NOT_NUMBER); err_assert(cJSON_IsObject(json_texture), ERR_JSON_VALUE_NOT_OBJECT); err_assert(cJSON_IsArray(json_ks) && cJSON_IsArray(json_ka) && cJSON_IsArray(json_kr) && cJSON_IsArray(json_kt) && cJSON_IsArray(json_ke), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_ks) == 3 && cJSON_GetArraySize(json_ka) == 3 && cJSON_GetArraySize(json_kr) == 3 && cJSON_GetArraySize(json_kt) == 3 && cJSON_GetArraySize(json_ke) == 3, ERR_JSON_ARRAY_SIZE); Vec3 ks, ka, kr, kt, ke; cJSON_parse_float_array(json_ks, ks); cJSON_parse_float_array(json_ka, ka); cJSON_parse_float_array(json_kr, kr); cJSON_parse_float_array(json_kt, kt); cJSON_parse_float_array(json_ke, ke); int32_t id = json_id->valueint; float shininess = json_shininess->valuedouble; float refractive_index = json_refractive_index->valuedouble; Texture *texture = texture_load(json_texture); material_init(material, id, ks, ka, kr, kt, ke, shininess, refractive_index, texture); } Material *get_material(const Context *context, const int32_t id) { size_t i; for (i = 0; i < context->num_materials; i++) if (context->materials[i].id == id) return &context->materials[i]; err(ERR_JSON_INVALID_MATERIAL); return NULL; } Texture *texture_load(const cJSON *json) { cJSON *json_type = cJSON_GetObjectItemCaseSensitive(json, "type"); err_assert(cJSON_IsString(json_type), ERR_JSON_VALUE_NOT_STRING); switch (djb_hash(json_type->valuestring)) { case 3226203393: { //uniform cJSON *json_color = cJSON_GetObjectItemCaseSensitive(json, "color"); err_assert(cJSON_IsArray(json_color), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_color) == 3, ERR_JSON_ARRAY_SIZE); TextureUniform *texture = malloc(sizeof(TextureUniform)); texture->texture.get_color = &texture_get_color_uniform; cJSON_parse_float_array(json_color, texture->color); return (Texture*)texture; } case 2234799246: { //checkerboard cJSON *json_colors = cJSON_GetObjectItemCaseSensitive(json, "colors"), *json_scale = cJSON_GetObjectItemCaseSensitive(json, "scale"); err_assert(cJSON_IsArray(json_colors), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_colors) == 2, ERR_JSON_ARRAY_SIZE); err_assert(cJSON_IsNumber(json_scale), ERR_JSON_VALUE_NOT_NUMBER); TextureCheckerboard *texture = malloc(sizeof(TextureCheckerboard)); texture->texture.get_color = &texture_get_color_checkerboard; texture->scale = json_scale->valuedouble; cJSON *json_iter; size_t i = 0; cJSON_ArrayForEach (json_iter, json_colors) { err_assert(cJSON_IsArray(json_iter), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_iter) == 3, ERR_JSON_ARRAY_SIZE); cJSON_parse_float_array(json_iter, texture->colors[i++]); } return (Texture*)texture; } case 176032948: { //brick cJSON *json_colors = cJSON_GetObjectItemCaseSensitive(json, "colors"), *json_scale = cJSON_GetObjectItemCaseSensitive(json, "scale"), *json_mortar_width = cJSON_GetObjectItemCaseSensitive(json, "mortar width"); err_assert(cJSON_IsArray(json_colors), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_colors) == 2, ERR_JSON_ARRAY_SIZE); err_assert(cJSON_IsNumber(json_scale) && cJSON_IsNumber(json_mortar_width), ERR_JSON_VALUE_NOT_NUMBER); TextureBrick *texture = malloc(sizeof(TextureBrick)); texture->texture.get_color = &texture_get_color_brick; texture->scale = json_scale->valuedouble; texture->mortar_width = json_mortar_width->valuedouble; cJSON *json_iter; size_t i = 0; cJSON_ArrayForEach (json_iter, json_colors) { err_assert(cJSON_IsArray(json_iter), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_iter) == 3, ERR_JSON_ARRAY_SIZE); cJSON_parse_float_array(json_iter, texture->colors[i++]); } return (Texture*)texture; } case 202158024: { //noisy periodic cJSON *json_color = cJSON_GetObjectItemCaseSensitive(json, "color"), *json_color_gradient = cJSON_GetObjectItemCaseSensitive(json, "color gradient"), *json_noise_feature_scale = cJSON_GetObjectItemCaseSensitive(json, "noise feature scale"), *json_noise_scale = cJSON_GetObjectItemCaseSensitive(json, "noise scale"), *json_frequency_scale = cJSON_GetObjectItemCaseSensitive(json, "frequency scale"), *json_function = cJSON_GetObjectItemCaseSensitive(json, "function"); err_assert(cJSON_IsArray(json_color) && cJSON_IsArray(json_color_gradient), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_color) == 3 && cJSON_GetArraySize(json_color_gradient) == 3, ERR_JSON_ARRAY_SIZE); err_assert(cJSON_IsNumber(json_noise_feature_scale) && cJSON_IsNumber(json_noise_scale) && cJSON_IsNumber(json_frequency_scale), ERR_JSON_VALUE_NOT_NUMBER); err_assert(cJSON_IsString(json_function), ERR_JSON_VALUE_NOT_STRING); TextureNoisyPeriodic *texture = malloc(sizeof(TextureNoisyPeriodic)); texture->texture.get_color = &texture_get_color_noisy_periodic; texture->noise_feature_scale = json_noise_feature_scale->valuedouble; texture->noise_scale = json_noise_scale->valuedouble; texture->frequency_scale = json_frequency_scale->valuedouble; cJSON_parse_float_array(json_color, texture->color); cJSON_parse_float_array(json_color_gradient, texture->color_gradient); switch (djb_hash(json_function->valuestring)) { case 193433777: //sin texture->func = PERIODIC_FUNC_SIN; break; case 193433504: //saw texture->func = PERIODIC_FUNC_SAW; break; case 837065195: //triangle texture->func = PERIODIC_FUNC_TRIANGLE; break; case 2144888260: //square texture->func = PERIODIC_FUNC_SQUARE; break; default: err(ERR_JSON_UNRECOGNIZED); } return (Texture*)texture; } default: err(ERR_JSON_UNRECOGNIZED); } return NULL; } void texture_get_color_uniform(const Texture *tex, const Vec3 point, Vec3 color) { (void)point; TextureUniform *texture = (TextureUniform*)tex; memcpy(color, texture->color, sizeof(Vec3)); } void texture_get_color_checkerboard(const Texture *tex, const Vec3 point, Vec3 color) { TextureCheckerboard *texture = (TextureCheckerboard*)tex; Vec3 scaled_point; mul3s(point, texture->scale, scaled_point); uint32_t parity = ((uint32_t)scaled_point[X] + (uint32_t)scaled_point[Y] + (uint32_t)scaled_point[Z]) % 2u; memcpy(color, texture->colors[parity], sizeof(Vec3)); } void texture_get_color_brick(const Texture *tex, const Vec3 point, Vec3 color) { TextureBrick *texture = (TextureBrick*)tex; Vec3 scaled_point; mul3s(point, texture->scale, scaled_point); uint32_t parity = (uint32_t)scaled_point[X] % 2u; scaled_point[Y] -= parity * .5f; uint32_t is_mortar = (scaled_point[X] - floorf(scaled_point[X]) < texture->mortar_width) || (scaled_point[Y] - floorf(scaled_point[Y]) < texture->mortar_width); memcpy(color, texture->colors[is_mortar], sizeof(Vec3)); } void texture_get_color_noisy_periodic(const Texture *tex, const Vec3 point, Vec3 color) { TextureNoisyPeriodic *texture = (TextureNoisyPeriodic*)tex; Vec3 scaled_point; mul3s(point, texture->noise_feature_scale, scaled_point); float angle = (point[X] + simplex_noise(scaled_point[X], scaled_point[Y], scaled_point[Z]) * texture->noise_scale) * texture->frequency_scale; switch (texture->func) { case PERIODIC_FUNC_SIN: mul3s(texture->color_gradient, (1.f + sinf(angle)) * .5f, color); break; case PERIODIC_FUNC_SAW: mul3s(texture->color_gradient, angle - floorf(angle), color); break; case PERIODIC_FUNC_TRIANGLE: mul3s(texture->color_gradient, fabs(2.f * (angle - floorf(angle) - .5f)), color); break; case PERIODIC_FUNC_SQUARE: mul3s(texture->color_gradient, !signbit(sinf(angle)), color); break; } add3v(color, texture->color, color); } /******************************************************************************* * Object *******************************************************************************/ void object_add(Object * const object, Context *context) { context->objects[context->num_objects++] = object; } void object_load(const cJSON *json, const Context *context, Object *object, const ObjectType object_type) { cJSON *json_epsilon = cJSON_GetObjectItemCaseSensitive(json, "epsilon"), *json_material = cJSON_GetObjectItemCaseSensitive(json, "material"), *json_num_lights = cJSON_GetObjectItemCaseSensitive(json, "lights"); err_assert(cJSON_IsNumber(json_material), ERR_JSON_VALUE_NOT_NUMBER); object->object_data = &OBJECT_DATA[object_type]; object->epsilon = cJSON_IsNumber(json_epsilon) ? (float)json_epsilon->valuedouble : -1.f; object->material = get_material(context, json_material->valueint); object->num_lights = cJSON_IsNumber(json_num_lights) ? json_num_lights->valueint : 0; } #ifdef UNBOUND_OBJECTS void unbound_objects_get_closest_intersection(const Context *context, const Line *ray, Object **closest_object, Vec3 closest_normal, float *closest_distance) { float distance; Vec3 normal; size_t i; for (i = 0; i < context->num_unbound_objects; i++) { Object *object = context->unbound_objects[i]; if (object->object_data->get_intersection(object, ray, &distance, normal) && distance < *closest_distance) { *closest_distance = distance; *closest_object = object; memcpy(closest_normal, normal, sizeof(Vec3)); } } } bool unbound_objects_is_light_blocked(const Context *context, const Line *ray, const float distance, Vec3 light_intensity, const Object *emittant_object) { (void)emittant_object; //NOTE: is unused because planes cant be lights size_t i; for (i = 0; i < context->num_unbound_objects; i++) { Object *object = context->unbound_objects[i]; if (object->object_data->intersects_in_range(context->unbound_objects[i], ray, distance)) { if (object->material->transparent) mul3v(light_intensity, object->material->kt, light_intensity); else return true; } } return false; } #endif /* UNBOUND_OBJECTS */ /******************************************************************************* * Sphere *******************************************************************************/ Sphere *sphere_load(const cJSON *json, Context *context) { cJSON *json_position = cJSON_GetObjectItemCaseSensitive(json, "position"), *json_radius = cJSON_GetObjectItemCaseSensitive(json, "radius"); err_assert(cJSON_IsNumber(json_radius), ERR_JSON_VALUE_NOT_NUMBER); err_assert(cJSON_IsArray(json_position), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_position) == 3, ERR_JSON_ARRAY_SIZE); Sphere *sphere = malloc(sizeof(Sphere)); object_load(json, context, (Object*)sphere, OBJECT_SPHERE); cJSON_parse_float_array(json_position, sphere->position); sphere->radius = json_radius->valuedouble; if (sphere->object.epsilon == -1.f) sphere->object.epsilon = sphere->radius * 0.0003f; return sphere; } void sphere_delete(Object *object) { free(object); } bool sphere_get_intersection(const Object *object, const Line *ray, float *distance, Vec3 normal) { Sphere *sphere = (Sphere*)object; if (line_intersects_sphere(sphere->position, sphere->radius, ray->position, ray->vector, sphere->object.epsilon, distance)) { mul3s(ray->vector, *distance, normal); add3v(normal, ray->position, normal); sub3v(normal, sphere->position, normal); mul3s(normal, 1.f / sphere->radius, normal); return true; } return false; } bool sphere_intersects_in_range(const Object *object, const Line *ray, const float min_distance) { Sphere *sphere = (Sphere*)object; float distance; bool intersects = line_intersects_sphere(sphere->position, sphere->radius, ray->position, ray->vector, sphere->object.epsilon, &distance); if (intersects && distance < min_distance) return true; return false; } BoundingCuboid *sphere_generate_bounding_cuboid(const Object *object) { Sphere *sphere = (Sphere*)object; Vec3 corners[2]; sphere_get_corners(object, corners); return bounding_cuboid_new(sphere->object.epsilon, corners); } void sphere_get_corners(const Object *object, Vec3 corners[2]) { Sphere *sphere = (Sphere*)object; sub3s(sphere->position, sphere->radius, corners[0]); add3s(sphere->position, sphere->radius, corners[1]); } void sphere_scale(const Object *object, const Vec3 neg_shift, const float scale) { Sphere *sphere = (Sphere*)object; sphere->object.epsilon *= scale; sphere->radius *= scale; sub3v(sphere->position, neg_shift, sphere->position); mul3s(sphere->position, scale, sphere->position); } void sphere_get_light_point(const Object *object, const Vec3 point, Vec3 light_point) { Sphere *sphere = (Sphere*)object; Vec3 normal; sub3v(sphere->position, point, normal); float inclination = rand_flt() * 2.f * PI; float azimuth = rand_flt() * 2.f * PI; Vec3 light_direction = SPHERICAL_TO_CARTESIAN(sphere->radius, inclination, azimuth); if (dot3(normal, light_direction)) mul3s(light_direction, -1.f, light_direction); add3v(sphere->position, light_direction, light_point); } /******************************************************************************* * Triangle *******************************************************************************/ void triangle_init(Triangle *triangle) { sub3v(triangle->vertices[1], triangle->vertices[0], triangle->edges[0]); sub3v(triangle->vertices[2], triangle->vertices[0], triangle->edges[1]); cross(triangle->edges[0], triangle->edges[1], triangle->normal); norm3(triangle->normal); if (triangle->object.epsilon == -1.f) { float magab = mag3(triangle->edges[0]) * mag3(triangle->edges[1]); triangle->object.epsilon = 0.003f * powf(0.5f * magab * sinf(acosf(dot3(triangle->edges[0], triangle->edges[1]) / magab)), 0.75f); }//TODO: improve this } void triangle_delete(Object *object) { free(object); } Triangle *triangle_load(const cJSON *json, Context *context) { cJSON *json_vertex_1 = cJSON_GetObjectItemCaseSensitive(json, "vertex_1"), *json_vertex_2 = cJSON_GetObjectItemCaseSensitive(json, "vertex_2"), *json_vertex_3 = cJSON_GetObjectItemCaseSensitive(json, "vertex_3"); err_assert(cJSON_IsArray(json_vertex_1) && cJSON_IsArray(json_vertex_2) && cJSON_IsArray(json_vertex_3), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_vertex_1) == 3 && cJSON_GetArraySize(json_vertex_2) == 3 && cJSON_GetArraySize(json_vertex_3) == 3, ERR_JSON_ARRAY_SIZE); Triangle *triangle = malloc(sizeof(Triangle)); object_load(json, context, (Object*)triangle, OBJECT_TRIANGLE); cJSON_parse_float_array(json_vertex_1, triangle->vertices[0]); cJSON_parse_float_array(json_vertex_2, triangle->vertices[1]); cJSON_parse_float_array(json_vertex_3, triangle->vertices[2]); triangle_init(triangle); return triangle; } bool triangle_get_intersection(const Object *object, const Line *ray, float *distance, Vec3 normal) { Triangle *triangle = (Triangle*)object; bool intersects = moller_trumbore(triangle->vertices[0], triangle->edges, ray->position, ray->vector, triangle->object.epsilon, distance); if (intersects) { memcpy(normal, triangle->normal, sizeof(Vec3)); return true; } return false; } bool triangle_intersects_in_range(const Object *object, const Line *ray, float min_distance) { Triangle *triangle = (Triangle*)object; float distance; bool intersects = moller_trumbore(triangle->vertices[0], triangle->edges, ray->position, ray->vector, triangle->object.epsilon, &distance); return intersects && distance < min_distance; } BoundingCuboid *triangle_generate_bounding_cuboid(const Object *object) { Triangle *triangle = (Triangle*)object; Vec3 corners[2]; triangle_get_corners(object, corners); return bounding_cuboid_new(triangle->object.epsilon, corners); } void triangle_get_corners(const Object *object, Vec3 corners[2]) { Triangle *triangle = (Triangle*)object; memcpy(corners[0], triangle->vertices[2], sizeof(Vec3)); memcpy(corners[1], triangle->vertices[2], sizeof(Vec3)); size_t i, j; for (i = 0; i < 2; i++) for (j = 0; j < 3; j++) { if (corners[0][j] > triangle->vertices[i][j]) corners[0][j] = triangle->vertices[i][j]; else if (corners[1][j] < triangle->vertices[i][j]) corners[1][j] = triangle->vertices[i][j]; } } void triangle_scale(const Object *object, const Vec3 neg_shift, const float scale) { Triangle *triangle = (Triangle*)object; triangle->object.epsilon *= scale; size_t i; for (i = 0; i < 3; i++) { sub3v(triangle->vertices[i], neg_shift, triangle->vertices[i]); mul3s(triangle->vertices[i], scale, triangle->vertices[i]); } for (i = 0; i < 2; i++) mul3s(triangle->edges[i], scale, triangle->edges[i]); } void triangle_get_light_point(const Object *object, const Vec3 point, Vec3 light_point) { //NOTE: this method may be inefficient due to the 3 square root operations, but it is unlikely to be used often (void)point; Triangle *triangle = (Triangle*)object; float p = rand_flt(), q = rand_flt(); if (p + q > 1.f) { p = 1.f - p; q = 1.f - q; } size_t i; #pragma GCC unroll 3 for (i = 0; i < 3; i++) light_point[i] = triangle->vertices[0][i] + (triangle->vertices[1][i] - triangle->vertices[0][i]) * p + (triangle->vertices[2][i] - triangle->vertices[0][i]) * q; } /******************************************************************************* * Plane *******************************************************************************/ #ifdef UNBOUND_OBJECTS Plane *plane_load(const cJSON *json, Context *context) { cJSON *json_position = cJSON_GetObjectItemCaseSensitive(json, "position"), *json_normal = cJSON_GetObjectItemCaseSensitive(json, "normal"); err_assert(cJSON_IsArray(json_position) && cJSON_IsArray(json_normal), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_position) == 3 && cJSON_GetArraySize(json_normal) == 3, ERR_JSON_ARRAY_SIZE); Plane *plane = malloc(sizeof(Plane)); object_load(json, context, (Object*)plane, OBJECT_PLANE); err_assert(!plane->object.material->emittant, ERR_JSON_EMITTANT); cJSON_parse_float_array(json_normal, plane->normal); norm3(plane->normal); Vec3 position; cJSON_parse_float_array(json_position, position); plane->d = dot3(plane->normal, position); return plane; } void plane_delete(Object *object) { free(object); } bool plane_get_intersection(const Object *object, const Line *ray, float *distance, Vec3 normal) { Plane *plane = (Plane*)object; float a = dot3(plane->normal, ray->vector); if (fabsf(a) < plane->object.epsilon) //ray is parallel to line return false; *distance = (plane->d - dot3(plane->normal, ray->position)) / dot3(plane->normal, ray->vector); if (*distance > plane->object.epsilon) { if (signbit(a)) memcpy(normal, plane->normal, sizeof(Vec3)); else mul3s(plane->normal, -1.f, normal); return true; } return false; } bool plane_intersects_in_range(const Object *object, const Line *ray, float min_distance) { Plane *plane = (Plane*)object; float a = dot3(plane->normal, ray->vector); if (fabsf(a) < plane->object.epsilon) //ray is parallel to line return false; float distance = (plane->d - dot3(plane->normal, ray->position)) / dot3(plane->normal, ray->vector); return distance > plane->object.epsilon && distance < min_distance; } void plane_scale(const Object *object, const Vec3 neg_shift, const float scale) { Plane *plane = (Plane*)object; Vec3 point = {1.f, 1.f, 1.f}; size_t i; for (i = 0; i < 3; i++) if (fabsf(plane->normal[i]) > plane->object.epsilon) break; point[i] = 0.f; point[i] = (plane->d - dot3(point, plane->normal)) / plane->normal[i]; sub3v(point, neg_shift, point); mul3s(point, scale, point); plane->d = dot3(plane->normal, point); plane->object.epsilon *= scale; } #endif /* UNBOUND_OBJECTS */ /******************************************************************************* * Mesh *******************************************************************************/ void mesh_load(const cJSON *json, Context *context) { cJSON *json_filename = cJSON_GetObjectItemCaseSensitive(json, "filename"), *json_position = cJSON_GetObjectItemCaseSensitive(json, "position"), *json_rotation = cJSON_GetObjectItemCaseSensitive(json, "rotation"), *json_scale = cJSON_GetObjectItemCaseSensitive(json, "scale"); err_assert(cJSON_IsString(json_filename), ERR_JSON_VALUE_NOT_STRING); err_assert(cJSON_IsNumber(json_scale), ERR_JSON_VALUE_NOT_NUMBER); err_assert(cJSON_IsArray(json_position) && cJSON_IsArray(json_rotation), ERR_JSON_VALUE_NOT_ARRAY); err_assert(cJSON_GetArraySize(json_position) == 3 && cJSON_GetArraySize(json_rotation) == 3, ERR_JSON_ARRAY_SIZE); float scale = json_scale->valuedouble; Vec3 position, rotation; cJSON_parse_float_array(json_position, position); cJSON_parse_float_array(json_rotation, rotation); FILE *file = fopen(json_filename->valuestring, "rb"); err_assert(file, ERR_JSON_IO_OPEN); Object *object = malloc(sizeof(Object)); object_load(json, context, object, OBJECT_TRIANGLE); err_assert(!object->material->emittant, ERR_JSON_EMITTANT); stl_load_objects(context, file, object, position, rotation, scale); fclose(file); } uint32_t stl_get_num_triangles(FILE *file) { err_assert(fseek(file, sizeof(uint8_t[80]), SEEK_SET) == 0, ERR_STL_IO_FP); uint32_t num_triangles; err_assert(fread(&num_triangles, sizeof(uint32_t), 1, file) == 1, ERR_STL_IO_READ); return num_triangles; } //assumes that file is at SEEK_SET void stl_load_objects(Context *context, FILE *file, const Object *object, const Vec3 position, const Vec3 rot, const float scale) { //ensure that file is binary instead of ascii char header[5]; err_assert(fread(header, sizeof(char), 5, file) == 5, ERR_STL_IO_READ); err_assert(strncmp("solid", header, 5), ERR_STL_ENCODING); float a = cosf(rot[Z]) * sinf(rot[Y]); float b = sinf(rot[Z]) * sinf(rot[Y]); Mat3 rotation_matrix = { { cosf(rot[Z]) * cosf(rot[Y]), a * sinf(rot[X]) - sinf(rot[Z]) * cosf(rot[X]), a * cosf(rot[X]) + sinf(rot[Z]) * sinf(rot[X]) }, { sinf(rot[Z]) * cosf(rot[Y]), b * sinf(rot[X]) + cosf(rot[Z]) * cosf(rot[X]), b * cosf(rot[X]) - cosf(rot[Z]) * sinf(rot[X]) }, { -sinf(rot[Y]), cosf(rot[Y]) * sinf(rot[X]), cosf(rot[Y]) * cosf(rot[X]) } }; uint32_t num_triangles = stl_get_num_triangles(file); context->objects_size += num_triangles - 1; context->objects = realloc(context->objects, sizeof(Object[context->objects_size])); err_assert(context->objects, ERR_MALLOC); uint32_t i; for (i = 0; i < num_triangles; i++) { STLTriangle stl_triangle; err_assert(fread(&stl_triangle, sizeof(STLTriangle), 1, file) == 1, ERR_STL_IO_READ); uint32_t j; for (j = 0; j < 3; j++) { Vec3 temp_vertices; mulm3(rotation_matrix, stl_triangle.vertices[j], temp_vertices); mul3s(temp_vertices, scale, stl_triangle.vertices[j]); add3v(stl_triangle.vertices[j], position, stl_triangle.vertices[j]); } Triangle *triangle = malloc(sizeof(Triangle)); memcpy(triangle, object, sizeof(Object)); memcpy(triangle->vertices, stl_triangle.vertices, sizeof(Vec3[3])); triangle_init(triangle); object_add((Object*)triangle, context); } } /******************************************************************************* * BoundingCuboid *******************************************************************************/ BoundingCuboid *bounding_cuboid_new(const float epsilon, Vec3 corners[2]) { BoundingCuboid *bounding_cuboid = malloc(sizeof(BoundingCuboid)); bounding_cuboid->epsilon = epsilon; memcpy(bounding_cuboid->corners, corners, sizeof(Vec3[2])); return bounding_cuboid; } void bounding_cuboid_delete(BoundingCuboid *bounding_cuboid) { free(bounding_cuboid); } bool bounding_cuboid_intersects(const BoundingCuboid *cuboid, const Line *ray, float *tmax, float *tmin) { float tymin, tymax; float divx = 1 / ray->vector[X]; if (divx >= 0) { *tmin = (cuboid->corners[0][X] - ray->position[X]) * divx; *tmax = (cuboid->corners[1][X] - ray->position[X]) * divx; } else { *tmin = (cuboid->corners[1][X] - ray->position[X]) * divx; *tmax = (cuboid->corners[0][X] - ray->position[X]) * divx; } float divy = 1 / ray->vector[Y]; if (divy >= 0) { tymin = (cuboid->corners[0][Y] - ray->position[Y]) * divy; tymax = (cuboid->corners[1][Y] - ray->position[Y]) * divy; } else { tymin = (cuboid->corners[1][Y] - ray->position[Y]) * divy; tymax = (cuboid->corners[0][Y] - ray->position[Y]) * divy; } if ((*tmin > tymax) || (tymin > *tmax)) return false; if (tymin > *tmin) *tmin = tymin; if (tymax < *tmax) *tmax = tymax; float tzmin, tzmax; float divz = 1 / ray->vector[Z]; if (divz >= 0) { tzmin = (cuboid->corners[0][Z] - ray->position[Z]) * divz; tzmax = (cuboid->corners[1][Z] - ray->position[Z]) * divz; } else { tzmin = (cuboid->corners[1][Z] - ray->position[Z]) * divz; tzmax = (cuboid->corners[0][Z] - ray->position[Z]) * divz; } if (*tmin > tzmax || tzmin > *tmax) return false; if (tzmin > *tmin) *tmin = tzmin; if (tzmax < *tmax) *tmax = tzmax; return *tmax > cuboid->epsilon; } /******************************************************************************* * JSON *******************************************************************************/ void cJSON_parse_float_array(const cJSON *json, float *array) { size_t i = 0; cJSON *json_iter; cJSON_ArrayForEach (json_iter, json) { err_assert(cJSON_IsNumber(json_iter), ERR_JSON_VALUE_NOT_NUMBER); array[i++] = json_iter->valuedouble; } } void scene_load(Context *context) { fseek(context->scene_file, 0, SEEK_END); size_t length = ftell(context->scene_file); fseek(context->scene_file, 0, SEEK_SET); char *buffer = malloc(length + 1); err_assert(buffer, ERR_MALLOC); err_assert(fread(buffer, 1, length, context->scene_file) == length, ERR_JSON_IO_READ); buffer[length] = '\0'; cJSON *json = cJSON_Parse(buffer); free(buffer); err_assert(json, ERR_JSON_IO_READ); err_assert(cJSON_IsObject(json), ERR_JSON_FIRST_TOKEN); cJSON *json_materials = cJSON_GetObjectItemCaseSensitive(json, "Materials"), *json_objects = cJSON_GetObjectItemCaseSensitive(json, "Objects"), *json_camera = cJSON_GetObjectItemCaseSensitive(json, "Camera"), *json_ambient_light = cJSON_GetObjectItemCaseSensitive(json, "AmbientLight"); err_assert(cJSON_IsObject(json_camera), ERR_JSON_NO_CAMERA); err_assert(cJSON_IsArray(json_objects) && cJSON_IsArray(json_materials), ERR_JSON_VALUE_NOT_ARRAY); context->camera = camera_load(json_camera, context); int num_objects = context->objects_size = cJSON_GetArraySize(json_objects), num_materials = cJSON_GetArraySize(json_materials); err_assert(num_objects > 0, ERR_JSON_NO_OBJECTS); err_assert(num_materials > 0, ERR_JSON_NO_MATERIALS); context->objects = malloc(sizeof(Object*) * num_objects); context->materials = malloc(sizeof(Material[num_materials])); err_assert(context->objects && context->materials, ERR_MALLOC); if (cJSON_IsArray(json_ambient_light) && cJSON_GetArraySize(json_ambient_light) == 3) cJSON_parse_float_array(json_ambient_light, context->global_ambient_light_intensity); cJSON *json_iter; cJSON_ArrayForEach (json_iter, json_materials) { err_assert(cJSON_IsObject(json_iter), ERR_JSON_VALUE_NOT_OBJECT); material_load(json_iter, &context->materials[context->num_materials++]); } #ifdef UNBOUND_OBJECTS size_t num_unbound_objects = 0; #endif size_t num_emittant_objects = 0; cJSON_ArrayForEach (json_iter, json_objects) { err_assert(cJSON_IsObject(json_iter), ERR_JSON_VALUE_NOT_OBJECT); cJSON *json_type = cJSON_GetObjectItemCaseSensitive(json_iter, "type"), *json_parameters = cJSON_GetObjectItemCaseSensitive(json_iter, "parameters"); err_assert(cJSON_IsObject(json_parameters), ERR_JSON_VALUE_NOT_OBJECT); err_assert(cJSON_IsString(json_type), ERR_JSON_VALUE_NOT_STRING); cJSON *json_material = cJSON_GetObjectItemCaseSensitive(json_parameters, "material"); err_assert(cJSON_IsNumber(json_material), ERR_JSON_VALUE_NOT_NUMBER); Material *material = get_material(context, json_material->valueint); if (material->emittant) num_emittant_objects++; #ifdef UNBOUND_OBJECTS if (djb_hash(json_type->valuestring) == 232719795) num_unbound_objects++; #endif } #ifdef UNBOUND_OBJECTS if (num_unbound_objects) context->unbound_objects = malloc(sizeof(Object*) * num_unbound_objects); #endif err_assert(num_emittant_objects, ERR_JSON_NO_LIGHTS); context->emittant_objects = malloc(sizeof(Object*) * num_emittant_objects); cJSON_ArrayForEach (json_iter, json_objects) { cJSON *json_type = cJSON_GetObjectItemCaseSensitive(json_iter, "type"), *json_parameters = cJSON_GetObjectItemCaseSensitive(json_iter, "parameters"); Object *object; switch (djb_hash(json_type->valuestring)) { case 3324768284: /* Sphere */ object = (Object*)sphere_load(json_parameters, context); break; case 103185867: /* Triangle */ object = (Object*)triangle_load(json_parameters, context); break; case 232719795: /* Plane */ #ifdef UNBOUND_OBJECTS object = (Object*)plane_load(json_parameters, context); break; #else continue; #endif case 2088783990: /* Mesh */ mesh_load(json_parameters, context); continue; } #ifdef UNBOUND_OBJECTS if (!object->object_data->is_bounded) context->unbound_objects[context->num_unbound_objects++] = object; #endif if (object->material->emittant) context->emittant_objects[context->num_emittant_objects++] = object; object_add(object, context); } cJSON_Delete(json); } /******************************************************************************* * MISC *******************************************************************************/ void err(const ErrorCode error_code) { printf(ERR_FORMAT_STRING, ERR_MESSAGES[error_code]); exit(error_code); } void get_closest_intersection(const Context *context, const Line *ray, Object **closest_object, Vec3 closest_normal, float *closest_distance) { #ifdef UNBOUND_OBJECTS unbound_objects_get_closest_intersection(context, ray, closest_object, closest_normal, closest_distance); #endif bvh_get_closest_intersection(context->bvh, ray, closest_object, closest_normal, closest_distance); } bool is_light_blocked(const Context *context, const Line *ray, const float distance, Vec3 light_intensity, const Object *emittant_object) { #ifdef UNBOUND_OBJECTS return unbound_objects_is_light_blocked(context, ray, distance, light_intensity, emittant_object) || bvh_is_light_blocked(context->bvh, ray, distance, light_intensity, emittant_object); #else return bvh_is_light_blocked(context->bvh, ray, distance, light_intensity, emittant_object); #endif } //Places all objects in scene in cube of side length 1 void normalize_scene(Context *context) { Vec3 min = {FLT_MAX}, max = {FLT_MIN}; size_t i, j; for (i = 0; i < context->num_objects; i++) { Object *object = context->objects[i]; #ifdef UNBOUND_OBJECTS if (object->object_data->is_bounded) { #endif Vec3 corners[2]; object->object_data->get_corners(object, corners); for (j = 0; j < 3; j++) { if (corners[0][j] < min[j]) min[j] = corners[0][j]; if (corners[1][j] > max[j]) max[j] = corners[1][j]; } #ifdef UNBOUND_OBJECTS } #endif } Vec3 range; sub3v(max, min, range); float scale_factor = 1.f / max3(range); for (i = 0; i < context->num_objects; i++) context->objects[i]->object_data->scale(context->objects[i], min, scale_factor); camera_scale(context->camera, min, scale_factor); } void cast_ray(const Context *context, const Line *ray, const Vec3 kr, Vec3 color, const uint32_t remaining_bounces, Object *inside_object) { Object *closest_object = NULL; Vec3 normal; float min_distance; Object *obj = inside_object; if (inside_object && inside_object->object_data->get_intersection(obj, ray, &min_distance, normal)) { closest_object = inside_object; } else { min_distance = FLT_MAX; get_closest_intersection(context, ray, &closest_object, normal, &min_distance); } Object *object = closest_object; if (! object) return; //LIGHTING MODEL Vec3 obj_color; Material *material = object->material; //emittance memcpy(obj_color, material->ke, sizeof(Vec3)); //Line originating at point of intersection Line outgoing_ray; mul3s(ray->vector, min_distance, outgoing_ray.position); add3v(outgoing_ray.position, ray->position, outgoing_ray.position); float b = dot3(normal, ray->vector); bool is_outside = signbit(b); size_t i, j; for (i = 0; i < context->num_emittant_objects; i++) { Object *emittant_object = context->emittant_objects[i]; if (emittant_object == object) continue; Vec3 light_intensity; mul3s(emittant_object->material->ke, 1.f / emittant_object->num_lights, light_intensity); for (j = 0; j < emittant_object->num_lights; j++) { Vec3 light_point, incoming_light_intensity; emittant_object->object_data->get_light_point(emittant_object, outgoing_ray.position, light_point); memcpy(incoming_light_intensity, light_intensity, sizeof(Vec3)); sub3v(light_point, outgoing_ray.position, outgoing_ray.vector); float light_distance = mag3(outgoing_ray.vector); mul3s(outgoing_ray.vector, 1.f / light_distance, outgoing_ray.vector); float a = dot3(outgoing_ray.vector, normal); if (is_outside && !is_light_blocked(context, &outgoing_ray, light_distance, incoming_light_intensity, emittant_object)) { Vec3 distance; sub3v(light_point, outgoing_ray.position, distance); switch (context->light_attenuation) { case LIGHT_ATTENUATION_NONE: break; case LIGHT_ATTENUATION_LINEAR: mul3s(incoming_light_intensity, 1.f / (context->light_attenuation_offset + mag3(distance)), incoming_light_intensity); break; case LIGHT_ATTENUATION_SQUARE: mul3s(incoming_light_intensity, 1.f / (context->light_attenuation_offset + magsqr3(distance)), incoming_light_intensity); break; } Vec3 diffuse; material->texture->get_color(material->texture, outgoing_ray.position, diffuse); mul3v(diffuse, incoming_light_intensity, diffuse); mul3s(diffuse, fmaxf(0., a), diffuse); Vec3 reflected; float specular_mul; switch (context->reflection_model) { case REFLECTION_PHONG: mul3s(normal, 2 * a, reflected); sub3v(reflected, outgoing_ray.vector, reflected); specular_mul = - dot3(reflected, ray->vector); break; case REFLECTION_BLINN: mul3s(outgoing_ray.vector, -1.f, reflected); add3v(reflected, ray->vector, reflected); norm3(reflected); specular_mul = - dot3(normal, reflected); break; } Vec3 specular; mul3v(material->ks, incoming_light_intensity, specular); mul3s(specular, fmaxf(0., powf(specular_mul, material->shininess)), specular); add3v3(obj_color, diffuse, specular, obj_color); } } } //global illumination switch (context->global_illumination_model) { case GLOBAL_ILLUMINATION_AMBIENT: { Vec3 ambient_light; mul3v(material->ka, context->global_ambient_light_intensity, ambient_light); add3v(obj_color, ambient_light, obj_color); } break; case GLOBAL_ILLUMINATION_PATH_TRACING: if (remaining_bounces && is_outside) { Mat3 rotation_matrix; if (normal[Y] - object->epsilon < -1.f) { Mat3 vx = { {1.f, 0.f, 0.f}, {0.f, -1.f, 0.f}, {0.f, 0.f, -1.f}, }; memcpy(rotation_matrix, vx, sizeof(Mat3)); } else { float mul = 1.f / (1.f + dot3((Vec3){0.f, 1.f, 0.f}, normal)); Mat3 vx = { { 1.f - sqr(normal[X]) * mul, normal[X], -normal[X] * normal[Z] * mul, }, { -normal[X], 1.f - (sqr(normal[X]) + sqr(normal[Z])) * mul, -normal[Z], }, { -normal[X] * normal[Z] * mul, normal[Z], 1.f - sqr(normal[Z]) * mul, }, }; memcpy(rotation_matrix, vx, sizeof(Mat3)); } Vec3 delta = {1.f, 1.f, 1.f}; size_t num_samples; if (remaining_bounces == context->max_bounces) { num_samples = context->samples_per_pixel; mul3s(delta, 1.f / (float)num_samples, delta); } else { num_samples = 1; } Vec3 light_mul; for (i = 0; i < num_samples; i++) { float inclination = acosf(rand_flt() * 2.f - 1.f); float azimuth = rand_flt() * PI; mulm3(rotation_matrix, (Vec3)SPHERICAL_TO_CARTESIAN(1, inclination, azimuth), outgoing_ray.vector); mul3s(delta, dot3(normal, outgoing_ray.vector), light_mul); cast_ray(context, &outgoing_ray, light_mul, obj_color, 0, NULL); } } break; } mul3v(obj_color, kr, obj_color); switch (context->light_attenuation) { case LIGHT_ATTENUATION_NONE: break; case LIGHT_ATTENUATION_LINEAR: mul3s(obj_color, 1.f / (context->light_attenuation_offset + min_distance), obj_color); break; case LIGHT_ATTENUATION_SQUARE: mul3s(obj_color, 1.f / sqr(context->light_attenuation_offset + min_distance), obj_color); break; } add3v(color, obj_color, color); if (!remaining_bounces) return; //reflection if (inside_object != object && material->reflective) { Vec3 reflected_kr; mul3v(kr, material->kr, reflected_kr); if (context->minimum_light_intensity_sqr < magsqr3(reflected_kr)) { mul3s(normal, 2 * b, outgoing_ray.vector); sub3v(ray->vector, outgoing_ray.vector, outgoing_ray.vector); cast_ray(context, &outgoing_ray, reflected_kr, color, remaining_bounces - 1, NULL); } } //transparency if (material->transparent) { Vec3 refracted_kt; mul3v(kr, material->kt, refracted_kt); if (context->minimum_light_intensity_sqr < magsqr3(refracted_kt)) { float incident_angle = acosf(fabs(b)); float refractive_multiplier = is_outside ? 1.f / material->refractive_index : material->refractive_index; float refracted_angle = asinf(sinf(incident_angle) * refractive_multiplier); float delta_angle = refracted_angle - incident_angle; Vec3 c, f, g, h; cross(ray->vector, normal, c); norm3(c); if (!is_outside) mul3s(c, -1.f, c); cross(c, ray->vector, f); mul3s(ray->vector, cosf(delta_angle), g); mul3s(f, sinf(delta_angle), h); add3v(g, h, outgoing_ray.vector); norm3(outgoing_ray.vector); cast_ray(context, &outgoing_ray, refracted_kt, color, remaining_bounces - 1, object); } } } void create_image(const Context *context) { Vec3 kr = {1.f, 1.f, 1.f}; Camera *camera = context->camera; Vec3 *raw_pixels = calloc(context->resolution[X] * context->resolution[Y], sizeof(Vec3)); #ifdef MULTITHREADING #pragma omp parallel for #endif for (uint32_t row = 0; row < camera->image.resolution[Y]; row++) { Vec3 pixel_position; mul3s(camera->image.vectors[Y], row, pixel_position); add3v(pixel_position, camera->image.corner, pixel_position); Line ray; memcpy(ray.position, camera->position, sizeof(Vec3)); uint32_t pixel_index = camera->image.resolution[X] * row; uint32_t col; for (col = 0; col < camera->image.resolution[X]; col++) { add3v(pixel_position, camera->image.vectors[X], pixel_position); sub3v(pixel_position, camera->position, ray.vector); norm3(ray.vector); cast_ray(context, &ray, kr, raw_pixels[pixel_index], context->max_bounces, NULL); pixel_index++; } } size_t image_size = camera->image.resolution[Y] * camera->image.resolution[X]; size_t i; float slope = context->brightness; if (context->normalize_colors) { float min = FLT_MAX, max = FLT_MIN; for (i = 0; i < image_size; i++) { float *raw_pixel = raw_pixels[i]; float pixel_min = min3(raw_pixel), pixel_max = max3(raw_pixel); if (pixel_min < min) min = pixel_min; if (pixel_max > max) max = pixel_max; } slope /= (max - min); for (i = 0; i < image_size; i++) { sub3s(raw_pixels[i], min, raw_pixels[i]); } } for (i = 0; i < image_size; i++) { mul3s(raw_pixels[i], slope, raw_pixels[i]); uint8_t *pixel = camera->image.pixels[i]; pixel[0] = (uint8_t)fmaxf(fminf(raw_pixels[i][0] * 255.f, 255.f), 0.f); pixel[1] = (uint8_t)fmaxf(fminf(raw_pixels[i][1] * 255.f, 255.f), 0.f); pixel[2] = (uint8_t)fmaxf(fminf(raw_pixels[i][2] * 255.f, 255.f), 0.f); } } void process_arguments(int argc, char *argv[], Context *context) { if (argc < 5) { if (argc == 2) { if (! strcmp("--help", argv[1]) || ! strcmp("-h", argv[1])) { printf("%s", HELPTEXT); exit(0); } } err(ERR_ARGC); } context->scene_file = fopen(argv[1], "rb"); err_assert(context->scene_file, ERR_ARGV_IO_OPEN_SCENE); err_assert(strstr(argv[2], ".ppm"), ERR_ARGV_FILE_EXT); context->output_file = fopen(argv[2], "wb"); err_assert(context->output_file, ERR_ARGV_IO_OPEN_OUTPUT); context->resolution[X] = abs(atoi(argv[3])); context->resolution[Y] = abs(atoi(argv[4])); int32_t i; for (i = 5; i < argc; i += 2) { switch (djb_hash(argv[i])) { case 5859045://-m #ifdef MULTITHREADING if (djb_hash(argv[i + 1]) == 193414065) {//max NUM_THREADS = omp_get_max_threads(); } else { NUM_THREADS = atoi(argv[i + 1]); err_assert(NUM_THREADS <= omp_get_max_threads(), ERR_ARGV_NUM_THREADS); } #else err(ERR_ARGV_MULTITHREADING); #endif /* MULTITHREADING */ break; case 5859050://-b context->max_bounces = abs(atoi(argv[i + 1])); break; case 5859049://-a context->minimum_light_intensity_sqr = sqr(atof(argv[i + 1])); break; case 5859067://-s switch (djb_hash(argv[i + 1])) { case 187940251://phong context->reflection_model = REFLECTION_PHONG; break; case 175795714://blinn context->reflection_model = REFLECTION_BLINN; break; default: err(ERR_ARGV_REFLECTION); } break; case 5859055://-g switch (djb_hash(argv[i + 1])) { case 354625309://ambient context->global_illumination_model = GLOBAL_ILLUMINATION_AMBIENT; break; case 2088095368://path context->global_illumination_model = GLOBAL_ILLUMINATION_PATH_TRACING; break; default: err(ERR_ARGV_GLOBAL_ILLUMINATION); } break; case 5859046://-n context->samples_per_pixel = abs(atoi(argv[i + 1])); break; case 5859051://-c context->normalize_colors = true; i--; break; case 5859044://-l switch (djb_hash(argv[i + 1])) { case 2087865487://none context->light_attenuation = LIGHT_ATTENUATION_NONE; break; case 193412846://lin context->light_attenuation = LIGHT_ATTENUATION_LINEAR; break; case 193433013://sqr context->light_attenuation = LIGHT_ATTENUATION_SQUARE; break; default: err(ERR_ARGV_LIGHT_ATTENUATION); } break; case 5859047://-o context->light_attenuation_offset = atof(argv[i + 1]); break; case 5859064://-p switch (djb_hash(argv[i + 1])) { case 2087865487://none context->log_option = LOG_NONE; break; case 2088303039://real context->log_option = LOG_REALTIME; break; case 193416643://cpu context->log_option = LOG_CPUTIME; break; } break; case 5859041://-i context->brightness = atof(argv[i + 1]); break; default: err(ERR_ARGV_UNRECOGNIZED); } } } void log_msg(const Context *context, const char *msg) { double t; switch (context->log_option) { case LOG_NONE: return; case LOG_REALTIME: { struct timespec cur_t; timespec_get(&cur_t, TIME_UTC); t = (cur_t.tv_sec - context->start_timespec.tv_sec) + (cur_t.tv_nsec - context->start_timespec.tv_nsec) * 1e-9; } break; case LOG_CPUTIME: { clock_t cur_t = clock(); t = (double)(cur_t - context->start_clock) / CLOCKS_PER_SEC; } break; } printf("[%07.3f] %s\n", t, msg); } int main(int argc, char *argv[]) { Context *context = context_new(); srand((unsigned) context->start_timespec.tv_sec); process_arguments(argc, argv, context); #ifdef MULTITHREADING omp_set_num_threads(NUM_THREADS); #endif log_msg(context, "INITIALIZING SCENE."); scene_load(context); fclose(context->scene_file); context->scene_file = NULL; normalize_scene(context); bvh_generate(context); log_msg(context, "INITIALIZED SCENE. BEGAN RENDERING."); create_image(context); log_msg(context, "FINISHED RENDERING. SAVING IMAGE."); save_image(context->output_file, &context->camera->image); fclose(context->output_file); log_msg(context, "SAVED IMAGE."); context_delete(context); }
layout.h
// // layout.h // forcelayout.cc // // Created by Andrei Kashcha on 5/30/16. // Copyright (c) 2015 Andrei Kashcha. All rights reserved. // #ifndef __layout__ #define __layout__ #include <vector> #include <unordered_map> #include <algorithm> #include <numeric> #include "nangraph.cc/graph.h" #include "random.cc/random.h" #include "quadtree.cc/quadtree.h" class BodyNotFoundException: public exception {}; struct LayoutSettings { double stableThreshold; double gravity; double theta; double dragCoeff; double springCoeff; double springLength; double timeStep; LayoutSettings() { stableThreshold = 0.009; gravity = -1.2; theta = 0.8; dragCoeff = 0.02; springCoeff = 0.0008; springLength = 30; timeStep = 20; } LayoutSettings& operator =(const LayoutSettings &other) { stableThreshold = other.stableThreshold; gravity = other.gravity; theta = other.theta; dragCoeff = other.dragCoeff; springCoeff = other.springCoeff; springLength = other.springLength; timeStep = other.timeStep; return *this; } }; class IForceLayout { public: virtual ~IForceLayout() {}; virtual IQuadTree *getTree() = 0; virtual double step() = 0; virtual IVector *getBodyPosition(const size_t &nodeId) = 0; }; template <size_t N> class ForceLayout : public IForceLayout { Graph &graph; LayoutSettings _settings; Random random; std::vector<Body<N>*> _bodies; // this maps nodeId to Body address. TODO: Find a way to not use this. std::unordered_map<std::size_t, Body<N>*> _nodeIdToBody; QuadTree<N> tree; void accumulate() { tree.insertBodies(_bodies); auto bodiesCount = _bodies.size(); #pragma omp parallel for for (std::size_t i = 0; i < bodiesCount; ++i) { auto body = _bodies[i]; body->force.reset(); tree.updateBodyForce(body); updateDragForce(body); } #pragma omp parallel for for (std::size_t i = 0; i < bodiesCount; ++i) { auto body = _bodies[i]; updateSpringForce(body); } } double integrate() { double timeStep = _settings.timeStep; double totalV = 0; auto bodiesCount = _bodies.size(); #pragma omp parallel for for (std::size_t i = 0; i < bodiesCount; ++i) { auto body = _bodies[i]; double coeff = timeStep / body->mass; body->velocity.addScaledVector(body->force, coeff); double v = body->velocity.length(); totalV += v; if (v > 1) body->velocity.normalize(); body->pos.addScaledVector(body->velocity, timeStep); } return totalV/_bodies.size(); } void updateDragForce(Body<N> *body) { body->force.addScaledVector(body->velocity, -_settings.dragCoeff); } void updateSpringForce(Body<N> *source) { Body<N> *body1 = source; for (auto body2 : source->springs) { Vector3<N> dist = body2->pos - body1->pos; double r = dist.length(); if (r == 0) { for (size_t i = 0; i < N; ++i) { dist.coord[i] = (random.nextDouble() - 0.5) / 50; } r = dist.length(); } double coeff = _settings.springCoeff * (r - _settings.springLength) / r; body1->force.addScaledVector(dist, coeff); body2->force.addScaledVector(dist, -coeff); } } void initBodies() { _bodies.reserve(graph.getNodesCount()); NodeCallback initBody = [&](const std::size_t& nodeId) -> bool { auto body = new Body<N>(); auto node = graph.getNode(nodeId); auto degree = node->degree(); body->mass = 1 + degree/3.0; _bodies.push_back(body); _nodeIdToBody[nodeId] = body; // TODO: Find a way to remove this and save ram. return false; }; graph.forEachNode(initBody); // Now that we have bodies, let's add links: LinkCallback initSpring = [&](const std::size_t& fromId, const std::size_t& toId, const std::size_t& linkId) -> bool { // TODO: Add verification _nodeIdToBody[fromId]->springs.push_back(_nodeIdToBody[toId]); return false; }; graph.forEachLink(initSpring); setInitialPositions(); } void setInitialPositions() { // TODO: I think this can be done better. For example, combine // CW algorithm with initial placement? auto nodesCount = _bodies.size(); std::vector<int> degree(nodesCount); std::iota(degree.begin(), degree.end(), 0); std::sort(degree.begin(), degree.end(), [&](const int &fromBody, const int &toBody) -> bool { return _bodies[fromBody]->springs.size() > _bodies[toBody]->springs.size(); }); for (auto body : _bodies){ Vector3<N> base; for (auto other: body->springs) { base.add(other->pos); } int neighboursSize = body->springs.size(); if (neighboursSize > 0) { base.multiplyScalar(1./neighboursSize); } int springLength = _settings.springLength; Vector3<N> offset; for (size_t i = 0; i < N; ++i) { offset.coord[i] = springLength * (random.nextDouble() - 0.5); } body->pos.set(base)->add(offset); } } static LayoutSettings makeDefaultSettings() { LayoutSettings settings; return settings; } public: ForceLayout(Graph &g, LayoutSettings settings) : graph(g), _settings(settings), random(42) { initBodies(); } ForceLayout(Graph &g) : ForceLayout(g, ForceLayout::makeDefaultSettings()) {} ~ForceLayout() { for (auto &i: _bodies) { delete i; } } /** * Sets a position for body. If no such body exist, throws BodyNotFoundException. */ void setPosition(const std::size_t &bodyId, const Vector3<N> &position) { auto body = getBody(bodyId); if (!body) { throw BodyNotFoundException(); } body->pos.set(position); } /** * Performs one iteration of force layout. Returns total movement performed * during that step. */ virtual double step() { accumulate(); double totalMovement = integrate(); return totalMovement; } /** * Given a node id from a graph, returns a pointer to its body object or * null if no such node is found. */ Body<N> *getBody(const std::size_t &nodeId) { auto search = _nodeIdToBody.find(nodeId); if (search == _nodeIdToBody.end()) { return nullptr; } return search->second; } virtual IVector *getBodyPosition(const std::size_t &nodeId) { auto body = getBody(nodeId); if (!body) return nullptr; return &(body->pos); } virtual IQuadTree *getTree() { return &tree; } }; #endif
task_nested_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define NUM_TASKS 50000 #define NUM_REPS 1 int o = 0; void sscal(float value, float *a) { *a = *a * value; } void na(float value) { o++; } void presscal(float value, float *a) { #pragma omp task { sscal(value, a); } #pragma omp task { na(value); } } int main(int argc, char *argv[]) { int i, r, nthreads; double *time, avg_time = 0.0; char *str, *endptr; float *a; double time2 = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } if (argc > 1) { str = argv[1]; } int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS; if (ntasks < nthreads) ntasks = nthreads; int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS; time = malloc(sizeof(double) * rep); a = malloc(sizeof(float) * ntasks); for (i = 0; i < ntasks; i++) { a[i] = i + 100.0f; } for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel { #pragma omp single { time2 = omp_get_wtime(); for (i = 0; i < ntasks; i++) { #pragma omp task firstprivate(i) { presscal(0.9f, &a[i]); } } time2 = omp_get_wtime() - time2; } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } // TODO: Just works with one repetition for (i = 0; i < ntasks; i++) { if (a[i] != (i + 100.0f) * 0.9f) { printf("error: a[%d]=%2.f expected %2.f\n", i, a[i], (i + 100.0f) * 0.9f); } } avg_time /= rep; printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n", nthreads, ntasks, avg_time, time2); printf("o=%d deberia valer %d\n", o, ntasks); return EXIT_SUCCESS; }
bfs_replicated_csc.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #define _GNU_SOURCE #include "common.h" #include "oned_csc.h" #include "onesided.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csc_graph g; static unsigned long* g_in_queue; static unsigned long* g_in_queue_summary; static unsigned long* g_out_queue; static unsigned long* g_out_queue_summary; static unsigned long* g_visited; static void allocate_memory(void) { int64_t maxlocalverts = g.max_nlocalverts; int64_t local_queue_summary_size = (maxlocalverts + ULONG_BITS * ULONG_BITS - 1) / ULONG_BITS / ULONG_BITS; int64_t local_queue_size = local_queue_summary_size * ULONG_BITS; int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long)); g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long)); g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long)); g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); } static void deallocate_memory(void) { free(g_in_queue); g_in_queue = NULL; free(g_in_queue_summary); g_in_queue_summary = NULL; free(g_out_queue); g_out_queue = NULL; free(g_out_queue_summary); g_out_queue_summary = NULL; free(g_visited); g_visited = NULL; } void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csc(tg, &g); allocate_memory(); /* Make sure all of the space is available */ deallocate_memory(); } void free_graph_data_structure(void) { free_oned_csc_graph(&g); /* deallocate_memory(); */ } int bfs_writes_depth_map(void) { return 0; } /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ void run_bfs(int64_t root, int64_t* pred) { allocate_memory(); const ptrdiff_t nlocalverts = g.nlocalverts; /* const int64_t nglobalverts = g.nglobalverts; */ const size_t* const restrict rowstarts = g.rowstarts; const int64_t* const restrict column = g.column; /* Set up the visited bitmap. */ int lg_local_queue_size = g.lg_local_queue_size; int64_t local_queue_size = INT64_C(1) << lg_local_queue_size; int64_t local_queue_summary_size = local_queue_size / ULONG_BITS; int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); #if 0 int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t)); { size_t i; for (i = 0; i < nlocaledges; ++i) { int64_t c = column[i]; column_swizzled[i] = SWIZZLE_VERTEX(c); } } #endif unsigned long* restrict in_queue = g_in_queue; memset(in_queue, 0, global_queue_size * sizeof(unsigned long)); unsigned long* restrict in_queue_summary = g_in_queue_summary; memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long)); unsigned long* restrict out_queue = g_out_queue; unsigned long* restrict out_queue_summary = g_out_queue_summary; unsigned long* restrict visited = g_visited; memset(visited, 0, local_queue_size * sizeof(unsigned long)); #define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ULONG_BITS; int bit_idx = vs % ULONG_BITS; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ULONG_BITS] |= (1UL << (word_idx % ULONG_BITS)); in_queue[word_idx] |= mask;} while (0) #define TEST_IN(vs) (((in_queue_summary[vs / ULONG_BITS / ULONG_BITS] & (1UL << ((vs / ULONG_BITS) % ULONG_BITS))) != 0) && ((in_queue[vs / ULONG_BITS] & (1UL << (vs % ULONG_BITS))) != 0)) #define TEST_VISITED_LOCAL(v) ((visited[(v) / ULONG_BITS] & (1UL << ((v) % ULONG_BITS))) != 0) #define TAS_VISITED_LOCAL(v) (((__sync_fetch_and_or(&visited[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))) & (1UL << ((v) % ULONG_BITS))) != 0) ? 1 : (__sync_fetch_and_or(&out_queue[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))), 0)) // #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0) #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0) SET_IN(root); {ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} if (VERTEX_OWNER(root) == rank) { pred[VERTEX_LOCAL(root)] = root; SET_VISITED_LOCAL(VERTEX_LOCAL(root)); } uint16_t cur_level = 0; while (1) { ++cur_level; #if 0 if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level); #endif memset(out_queue, 0, local_queue_size * sizeof(unsigned long)); // memset(out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long)); ptrdiff_t i, ii_summary; #if 0 #pragma omp parallel for schedule(static) for (i = 0; i < global_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) { if (in_queue[i * ULONG_BITS + j]) val |= mask; } in_queue_summary[i] = val; } #endif unsigned long not_done = 0; #pragma omp parallel for schedule(static) reduction(|:not_done) for (ii_summary = 0; ii_summary < global_queue_summary_size; ++ii_summary) { uint64_t val_summary = in_queue_summary[ii_summary]; if (val_summary == 0) continue; int ii_offset; ptrdiff_t ii; for (ii_offset = 0; ii_offset < ULONG_BITS; ++ii_offset) { if ((val_summary & (UINT64_C(1) << ii_offset)) == 0) continue; ii = ii_summary * ULONG_BITS + ii_offset; uint64_t val = in_queue[ii]; if (val == 0) continue; size_t i, i_end = rowstarts[ii + 1]; for (i = rowstarts[ii]; i < i_end; ++i) { int64_t c = column[i]; int64_t v0_local = c / ULONG_BITS; if ((val & (UINT64_C(1) << (c % ULONG_BITS))) != 0 /* TEST_IN(v1_swizzled) */ && !TAS_VISITED_LOCAL(v0_local)) { assert (pred[v0_local] == -1); int64_t v1_swizzled = (int64_t)ii * ULONG_BITS + c % ULONG_BITS; pred[v0_local] = UNSWIZZLE_VERTEX(v1_swizzled); not_done |= 1; } } } } #if 1 #pragma omp parallel for schedule(static) for (i = 0; i < local_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) { unsigned long full_val = out_queue[i * ULONG_BITS + j]; visited[i * ULONG_BITS + j] |= full_val; if (full_val) val |= mask; } out_queue_summary[i] = val; // not_done |= val; } #endif MPI_Allreduce(MPI_IN_PLACE, &not_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD); if (not_done == 0) break; MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); } deallocate_memory(); } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { owner[i] = VERTEX_OWNER(vertex[i]); local[i] = VERTEX_LOCAL(vertex[i]); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
graph_tool.h
#ifndef _GRAPH_TOOL_H #define _GRAPH_TOOL_H #include <omp.h> #include <iostream> #include <map> #include <memory> #include <vector> #include <fstream> #include <algorithm> #include <sstream> #include <limits> #include <cstdint> namespace apsara { namespace odps { namespace graph { namespace query { template <typename VType> class GraphTool { public: GraphTool() {} virtual ~GraphTool() {} void PrepareCSR(const std::string &fileList, const std::string &csrFolder); void PrepareAdj(const std::string &fileList, const std::string &adjFolder); void CheckSumOrig(const std::string &fileList); void CheckSumCSR(const std::string &folder); void CheckSumAdj(const std::string &folder); void FillCSR(const std::map<VType, std::vector<VType>> &graph, const size_t &numV, std::shared_ptr<VType> &vIdx, std::shared_ptr<VType> &eIdx); static size_t ReadOneFile(const std::string &file, std::shared_ptr<VType> &data); static void ReadOneFile(const std::string &file, const std::string &fileLen, std::map<VType, std::shared_ptr<VType>> &graph); private: void WriteOneFile(const std::string &file, std::shared_ptr<VType> &data, const size_t &len); void WriteOneFile(const std::string &file, const std::string &fileLen, std::map<VType, std::vector<VType>> &graph); }; template<typename VType> void GraphTool<VType>::PrepareCSR(const std::string &fileList, const std::string &csrFolder) { VType maxV = 0; VType minV = std::numeric_limits<VType>::max(); std::shared_ptr<VType> vIdx; std::shared_ptr<VType> eIdx; std::shared_ptr<VType> revVIdx; std::shared_ptr<VType> revEIdx; std::map<VType, std::vector<VType>> graph; std::map<VType, std::vector<VType>> revGraph; std::ifstream ifs(fileList); std::string file; size_t numV=0; size_t numE=0; while(getline(ifs, file)) { std::cout<<"Process: "<<file<<std::endl; std::ifstream ifs1(file); std::string line; while(getline(ifs1, line)) { std::stringstream ss(line); VType from, to; ss>>from; ss>>to; maxV = std::max((VType)std::max(from,to), maxV); minV = std::min((VType)std::min(from,to), minV); graph[from].push_back(to); revGraph[to].push_back(from); numE++; } } for(auto iter = graph.begin(); iter != graph.end(); ++iter) sort(iter->second.begin(), iter->second.end()); for(auto iter = revGraph.begin(); iter != revGraph.end(); ++iter) sort(iter->second.begin(), iter->second.end()); numV = (maxV-minV) + 1; std::cout<<"numV: "<<numV<<" numE: "<<numE<<std::endl; vIdx = std::shared_ptr<VType>(new VType[numV]); eIdx = std::shared_ptr<VType>(new VType[numE+1]); revVIdx = std::shared_ptr<VType>(new VType[numV]); revEIdx = std::shared_ptr<VType>(new VType[numE+1]); FillCSR(graph, numV, vIdx, eIdx); FillCSR(revGraph, numV, revVIdx, revEIdx); WriteOneFile(csrFolder+"/vIdx.bin", vIdx, (size_t)numV*sizeof(VType)); WriteOneFile(csrFolder+"/eIdx.bin", eIdx, (size_t)numE*sizeof(VType)); WriteOneFile(csrFolder+"/revVIdx.bin", revVIdx, (size_t)numV*sizeof(VType)); WriteOneFile(csrFolder+"/revEIdx.bin", revEIdx, (size_t)numE*sizeof(VType)); } template<typename VType> void GraphTool<VType>::FillCSR(const std::map<VType, std::vector<VType>> &graph, const size_t &numV, std::shared_ptr<VType> &vIdx, std::shared_ptr<VType> &eIdx) { VType sum=0; for(VType i=0; i<numV; i++) { VType from = sum; auto iter = graph.find(i); if(iter == graph.end()) { vIdx.get()[i] = sum; continue; } VType to = sum+iter->second.size(); sum += iter->second.size(); vIdx.get()[i] = sum; for(VType j=from, k=0; j<to; j++, k++) { iter->second[k]; eIdx.get()[j] = iter->second[k]; } } } template<typename VType> void GraphTool<VType>::WriteOneFile(const std::string &file, std::shared_ptr<VType> &data, const size_t &len) { std::ofstream of(file); char *buf = (char*)(data.get()); of.write(buf, len); of.close(); } template<typename VType> void GraphTool<VType>::PrepareAdj(const std::string &fileList, const std::string &adjFolder) { VType maxV = 0; VType minV = std::numeric_limits<VType>::max(); std::map<VType, std::vector<VType>> graph; std::map<VType, std::vector<VType>> revGraph; std::ifstream ifs(fileList); std::string file; size_t numV=0; size_t numE=0; while(getline(ifs, file)) { std::cout<<"Process: "<<file<<std::endl; std::ifstream ifs1(file); std::string line; while(getline(ifs1, line)) { std::stringstream ss(line); VType from, to; ss>>from; ss>>to; maxV = std::max((VType)std::max(from,to), maxV); minV = std::min((VType)std::min(from,to), minV); graph[from].push_back(to); revGraph[to].push_back(from); numE++; } } for(auto iter = graph.begin(); iter != graph.end(); ++iter) sort(iter->second.begin(), iter->second.end()); for(auto iter = revGraph.begin(); iter != revGraph.end(); ++iter) sort(iter->second.begin(), iter->second.end()); numV = (maxV-minV)+1; std::cout<<"numV: "<<numV<<" numE: "<<numE<<std::endl; WriteOneFile(adjFolder+"/graph.bin", adjFolder+"/graphLen.bin", graph); WriteOneFile(adjFolder+"/revGraph.bin", adjFolder+"/revGraphLen.bin", revGraph); } template<typename VType> void GraphTool<VType>::WriteOneFile(const std::string &file, const std::string &fileLen, std::map<VType, std::vector<VType>> &graph) { std::ofstream of(file); std::ofstream ofLen(fileLen); size_t *dataLen = new size_t[graph.size()]; size_t count = 0; for(auto iter = graph.begin(); iter != graph.end(); ++iter) { size_t len = iter->second.size()+1; VType *data = new VType[len]; data[0] = iter->first; for(size_t i=0; i<iter->second.size(); ++i) data[i+1] = iter->second[i]; of.write((char*)data, len*sizeof(VType)); dataLen[count++] = len*sizeof(VType); } ofLen.write((char*)dataLen, graph.size()*sizeof(size_t)); of.close(); ofLen.close(); } template<typename VType> void GraphTool<VType>::CheckSumOrig(const std::string &fileList) { size_t sumFrom=0; size_t sumTo=0; std::ifstream ifs(fileList); std::string file; while(getline(ifs, file)) { std::cout<<"Process: "<<file<<std::endl; std::ifstream ifs1(file); std::string line; while(getline(ifs1, line)) { std::stringstream ss(line); VType from, to; ss>>from; ss>>to; sumFrom += from; sumTo += to; } } std::cout<<"Sum from:"<<sumFrom<<" Sum to:"<<sumTo<<std::endl; } template<typename VType> void GraphTool<VType>::CheckSumCSR(const std::string &folder) { size_t sumFrom=0; size_t sumTo=0; VType numE; VType numV; std::shared_ptr<VType> vIdx; std::shared_ptr<VType> eIdx; std::shared_ptr<VType> revVIdx; std::shared_ptr<VType> revEIdx; #pragma omp parallel num_threads(4) { int tid = omp_get_thread_num(); if(tid == 0) { numV = GraphTool<VType>::ReadOneFile(folder+"/vIdx.bin", vIdx); } if(tid == 1) { numE = GraphTool<VType>::ReadOneFile(folder+"/eIdx.bin", eIdx); } if(tid == 2) { numV = GraphTool<VType>::ReadOneFile(folder+"/revVIdx.bin", revVIdx); } if(tid == 3) { numE = GraphTool<VType>::ReadOneFile(folder+"/revEIdx.bin", revEIdx); } } std::cout<<"numV: "<<numV<<" numE: "<<numE<<std::endl; for(size_t i=0; i<numV; i++) { uint64_t from = i==0 ? 0 :vIdx.get()[i-1]; uint64_t to = vIdx.get()[i]; sumFrom += (to-from) * i; for(size_t j=from; j<to; j++) { sumTo += eIdx.get()[j]; } } std::cout<<"Sum from:"<<sumFrom<<" Sum to:"<<sumTo<<std::endl; sumFrom=0; sumTo=0; for(size_t i=0; i<numV; i++) { uint64_t from = i==0 ? 0 :revVIdx.get()[i-1]; uint64_t to = revVIdx.get()[i]; sumFrom += (to-from) * i; for(size_t j=from; j<to; j++) { sumTo += revEIdx.get()[j]; } } std::cout<<"Sum from:"<<sumFrom<<" Sum to:"<<sumTo<<std::endl; } template<typename VType> size_t GraphTool<VType>::ReadOneFile(const std::string &file, std::shared_ptr<VType> &data) { std::ifstream if1(file); if1.seekg (0, if1.end); size_t length = if1.tellg(); if1.seekg (0, if1.beg); char *buf; { // parallel malloc will hurdle performance #pragma omp critical buf = new char[length]; } if1.read(buf, length); data = std::shared_ptr<VType>((VType*)buf); return length/sizeof(VType); } template<typename VType> void GraphTool<VType>::CheckSumAdj(const std::string &folder) { size_t sumFrom=0; size_t sumTo=0; std::map<VType, std::shared_ptr<VType>> graph; std::map<VType, std::shared_ptr<VType>> revGraph; ReadOneFile(folder+"/graph.bin", folder+"/graphLen.bin", graph); ReadOneFile(folder+"/revGraph.bin", folder+"/revGraphLen.bin", revGraph); for(auto iter = graph.begin(); iter != graph.end(); ++iter) { VType num = iter->second.get()[0]; sumFrom += iter->first * num; for(size_t i=1; i<=num; i++) { sumTo += iter->second.get()[i]; } } std::cout<<"sumFrom: "<<sumFrom<<" sumTo: "<<sumTo<<std::endl; sumFrom = 0; sumTo = 0; for(auto iter = revGraph.begin(); iter != revGraph.end(); ++iter) { VType num = iter->second.get()[0]; sumFrom += iter->first * num; for(size_t i=1; i<=num; i++) sumTo += iter->second.get()[i]; } std::cout<<"sumFrom: "<<sumFrom<<" sumTo: "<<sumTo<<std::endl; } template<typename VType> void GraphTool<VType>::ReadOneFile(const std::string &file, const std::string &fileLen, std::map<VType, std::shared_ptr<VType>> &graph) { std::ifstream ifs(file); std::ifstream ifsLen(fileLen); ifsLen.seekg (0, ifsLen.end); size_t length = ifsLen.tellg(); ifsLen.seekg (0, ifsLen.beg); char *buf = new char[length]; ifsLen.read(buf, length); ifsLen.close(); size_t *lens = (size_t*)buf; for(size_t i=0; i<length/sizeof(size_t); i++) { size_t sz = lens[i]; char *buf; { #pragma omp critical buf = new char[sz]; } ifs.read(buf, sz); graph[((VType*)buf)[0]] = std::shared_ptr<VType>((VType*)buf); ((VType*)buf)[0] = sz/sizeof(VType) -1; } ifs.close(); } } // namespace query } // namespace graph } // namespace odps } // namespace apsara #endif
omp_sort.h
#ifndef OMP_SORT_PP_CLASS_H #define OMP_SORT_PP_CLASS_H #include "pch.h" #include <stdio.h> #include <stdlib.h> #include <malloc.h> #include <omp.h> #include <math.h> #define MAX(A, B) (((A) > (B)) ? (A) : (B)) #define MIN(A, B) (((A) > (B)) ? (B) : (A)) #define UP 0 #define DOWN 1 #define VERBOSE 0 int omp_bubble_sort(int *A, int n); int omp_odd_even_sort(int *A, int n); int omp_rank_sort(int *A, int n); int omp_counting_sort(int *x, int n); int swap(int *a, int *b); int bitonic_sort_seq(int start, int length, int *A, int flag); int bitonic_sort_par(int start, int length, int *A, int flag, int m); int bitonic(int *A, int n); static int CmpInt(const void *a, const void *b); void merge(int A[], int B[], int m, int n); void arraymerge(int *a, int size, int *index, int N); int QuickSort(int *a, int size); int RadixSort(int *input, int n); void Mergesort(int *x, int a, int b); void mix_them(int *x, int n, int h, int num_threads); void mix(int *x, int a1, int b1, int a2, int b2); int Mergesort_Omp(int *x, int n); // Bubble sort ------------------------------------------------------------------- int omp_bubble_sort(int *A, int n) { int n_pros = omp_get_num_threads(); int chunk = n / n_pros, bandera = 1, nr = 0, i, j; int aux; while (bandera) { nr++; bandera = 0; #pragma omp parallel for reduction(+:bandera) private(aux) for (j = 0; j < n_pros; j++) { for (i = j; i < n - 1; i = i + n_pros) { if (A[i] > A[i + 1]) { aux = A[i]; A[i] = A[i + 1]; A[i + 1] = aux; ++bandera; } } } } return 0; } // end bubble sort ------------------------------------------------------------------- // odd even transposition sort ------------------------------------------------------------------- int omp_odd_even_sort(int *A, int n) { int sorted, i; sorted = 1; while (sorted != 0) { #pragma omp parallel { sorted = 0; #pragma omp for reduction(+:sorted) for (i = 0; i < n - 1; i += 2) { if (A[i] > A[i + 1]) { int temp = A[i]; A[i] = A[i + 1]; A[i + 1] = temp; sorted++; } } #pragma omp for reduction(+:sorted) for (i = 1; i < n - 1; i += 2) { if (A[i] > A[i + 1]) { int temp = A[i]; A[i] = A[i + 1]; A[i + 1] = temp; sorted++; } } } } return 0; } // end odd even transposition sort ------------------------------------------------------------------- // rank sort ------------------------------------------------------------------- int omp_rank_sort(int *A, int n) { int *y; y = (int *)calloc(n, sizeof(int)); #pragma omp parallel { int threads = omp_get_num_threads(); int rank, i, j, startval, endval, my_num, my_place; rank = omp_get_thread_num(); startval = n * rank / threads; endval = n * (rank + 1) / threads; //printf(" %d %d\n",startval, endval); for (j = startval; j < endval; j++) { my_num = A[j]; my_place = 0; for (i = 0; i < n; i++) { if (my_num > A[i]) my_place++; } y[my_place] = my_num; } } int k; for (k = 0; k < n; k++) { if (y[k] == 0) A[k] = A[k - 1]; else A[k] = y[k]; } free(y); return 0; } //end rank sort ------------------------------------------------------------------- // counting sort ------------------------------------------------------------------- int omp_counting_sort(int *A, int n) { //Enteros en [1,m] y variables int m = 1001, i; int *c = (int*)calloc(m, sizeof(int)); int *b = (int*)calloc(n, sizeof(int)); //printf("1\n"); //Copiando x en b ( solo aki se paraleliza XD ) #pragma omp parallel for private(i) shared(A,b) for (i = 0; i < n; i++) { b[i] = A[i]; } //printf("2\n"); //Contando for (i = 0; i < n; i++) { //printf("%d ", b[i]); c[b[i] - 1]++; //TODO index error -1 } //printf("3\n"); //Suma prefija for (i = 1; i < m; i++) c[i] += c[i - 1]; //printf("4\n"); //Ordenando for (i = 0; i < n; i++) { A[c[b[i] - 1] - 1] = b[i]; c[b[i] - 1]--; } //printf("5\n"); //Liberando arreglos auxiliares free(c); free(b); return 0; } // end counting sort ------------------------------------------------------------------- // bitonic sort ------------------------------------------------------------------- int swap(int *a, int *b) { int temp; temp = *a; *a = *b; *b = temp; return 0; } int bitonic_sort_seq(int start, int length, int *A, int flag) { int i; int split_length; if (length == 1) return 1; if (length % 2 != 0) { printf("error\n"); exit(0); } split_length = length / 2; // bitonic split for (i = start; i < start + split_length; i++) { if (flag == UP) { if (A[i] > A[i + split_length]) swap(&A[i], &A[i + split_length]); } else { if (A[i] < A[i + split_length]) swap(&A[i], &A[i + split_length]); } } bitonic_sort_seq(start, split_length, A, flag); bitonic_sort_seq(start + split_length, split_length, A, flag); return 0; } int bitonic_sort_par(int start, int length, int *A, int flag, int m) { int i; int split_length; if (length == 1) { return 0; } // la longitud de la subsecuencia debe ser potencia de 2 if (length % 2 != 0) { exit(0); } split_length = length / 2; // bitonic split #pragma omp parallel for shared(A, flag, start, split_length) private(i) for (i = start; i < start + split_length; i++) { if (flag == UP) { if (A[i] > A[i + split_length]) swap(&A[i], &A[i + split_length]); } else { if (A[i] < A[i + split_length]) swap(&A[i], &A[i + split_length]); } } if (split_length > m) { bitonic_sort_par(start, split_length, A, flag, m); bitonic_sort_par(start + split_length, split_length, A, flag, m); } return 0; } /** \brief Bitonic Sort. Algoritmo de ordenamiento para conjuntos de datos cuya longitus es potencia de 2. */ int bitonic(int *A, int n) { int m; int i, j; int flag; int num_threads; //Numero de hilos con los que se esta trabajando num_threads = omp_get_num_threads(); //n debe ser almenos mayor que el doble de procesadores if (n < num_threads * 2) { return 1; } // particionando m = n / num_threads; //Primer parte del bitonic donde particionamos for (i = 2; i <= m; i = 2 * i) { #pragma omp parallel for shared(i, A) private(j, flag) for (j = 0; j < n; j += i) { if ((j / i) % 2 == 0) flag = UP; else flag = DOWN; bitonic_sort_seq(j, i, A, flag); } } // Segunda parte del bitonic for (i = 2; i <= num_threads; i = 2 * i) { for (j = 0; j < num_threads; j += i) { if ((j / i) % 2 == 0) flag = UP; else flag = DOWN; bitonic_sort_par(j*m, i*m, A, flag, m); } #pragma omp parallel for shared(j) for (j = 0; j < num_threads; j++) { if (j < i) flag = UP; else flag = DOWN; bitonic_sort_seq(j*m, m, A, flag); } } return 0; } // end bitonic sort ------------------------------------------------------------------- // quick sort ------------------------------------------------------------------- static int CmpInt(const void *a, const void *b) { return (*(int*)a - *(int*)b); } /* Merge sorted lists A and B into list A. A must have dim >= m+n */ void merge(int A[], int B[], int m, int n) { int i = 0, j = 0, k = 0, p; int size = m + n; int *C = (int *)malloc(size * sizeof(int)); while (i < m && j < n) { if (A[i] <= B[j]) C[k] = A[i++]; else C[k] = B[j++]; k++; } if (i < m) for (p = i; p < m; p++, k++) C[k] = A[p]; else for (p = j; p < n; p++, k++) C[k] = B[p]; for (i = 0; i < size; i++) A[i] = C[i]; free(C); } /* Merges N sorted sub-sections of array a into final, fully sorted array a */ void arraymerge(int *a, int size, int *index, int N) { int i, j; while (N > 1) { for (i = 0; i < N; i++) index[i] = i * size / N; index[N] = size; #pragma omp parallel for private(i) for (i = 0; i < N; i += 2) { if (VERBOSE) fprintf(stderr, "merging %d and %d, index %d and %d (up to %d)\n", i, i + 1, index[i], index[i + 1], index[i + 2]); merge(a + index[i], a + index[i + 1], index[i + 1] - index[i], index[i + 2] - index[i + 1]); if (VERBOSE) for (j = 0; j < size; j++) fprintf(stderr, "after: %d %d\n", j, a[j]); } N /= 2; } } int QuickSort(int *a, int size) { // set up threads int i = 0; int threads = omp_get_max_threads(); omp_set_num_threads(threads); int *index = (int *)malloc((threads + 1) * sizeof(int)); for (i = 0; i < threads; i++) index[i] = i * size / threads; index[threads] = size; /* Main parallel sort loop */ #pragma omp parallel for private(i) for (i = 0; i < threads; i++) qsort(a + index[i], index[i + 1] - index[i], sizeof(int), CmpInt); /* Merge sorted array pieces */ if (threads > 1) arraymerge(a, size, index, threads); return 0; } // end quick sort ------------------------------------------------------------------- // radix sort int RadixSort(int *input, int n) { int d = 1001; //highest-order digit int b = 10, k = d;//// int *temp = (int*)malloc(k * sizeof(int)); //used in counting sort int *input2 = (int*)malloc(sizeof(int)*n); int *output = (int*)malloc(sizeof(int)*n); int power = ((int)pow(2, b) - 1); int l; int t = omp_get_num_threads(); //UNUSED //omp_set_num_threads(t); //main loop for (l = 0; l < 32 / b; l++) { int slice = l * b; int i, j; if (slice <= d) { #pragma omp parallel { #pragma omp for schedule(guided) private(i) for (i = 0; i < n; i++) input2[i] = (input[i] >> slice) & power; #pragma omp for schedule(guided) private(i) for (i = 0; i < k; i++) temp[i] = 0; #pragma omp for schedule(guided) private(j) for (j = 0; j < n; j++) #pragma omp atomic temp[input2[j]]++; #pragma omp for ordered schedule(guided) private(i) for (i = 1; i < k; i++) #pragma omp ordered temp[i] += temp[i - 1]; } for (j = n - 1; j >= 0; j--) { output[temp[input2[j]] - 1] = input[j]; temp[input2[j]]--; } #pragma omp parallel for schedule(guided) private(j) for (j = 0; j < n; j++) input[j] = output[j]; } } return 0; } //end radix sort ------------------------------------------------------------------- //merge sort ------------------------------------------------------------------- //Funcion para mezclar 2 subvectores ordenados contiguos void mix(int *x, int a1, int b1, int a2, int b2) { //Obteniendo numero de eltos en cada subvector int n1 = b1 - a1 + 1; int n2 = b2 - a2 + 1; //Creando copias de subvectores int *x1 = (int*)calloc(n1, sizeof(int)); int *x2 = (int*)calloc(n2, sizeof(int)); //Copiando subvectores int i, i1 = 0, i2 = 0; for (i = 0; i < n1; i++) x1[i] = x[a1 + i]; for (i = 0; i < n2; i++) x2[i] = x[a2 + i]; //Limpiando iterador i = a1; //Iterando sobre el rango de ambos subvectores while (i1 < n1 && i2 < n2) { //Determinando el elto menor, copiandolo al //buffer y recorriendo su apuntador if (x1[i1] < x2[i2]) { x[i] = x1[i1]; i1++; } else { x[i] = x2[i2]; i2++; } //Incrementando indice de x i++; }//Fin de while sobre ambos subvectores //Terminando el subvector 1 si aun no termina while (i1 < n1) { //Actualizando valor y recorriendo indices x[i] = x1[i1]; i++; i1++; } //Terminando el subvector 2 si aun no termina while (i2 < n2) { //Actualizando valor y recorriendo indices x[i] = x2[i2]; i++; i2++; } //Liberando memoria free(x1); free(x2); } //Mergesort de un subvector en los indices [a,b] void Mergesort(int *x, int a, int b) { //Indices de subvectores int a1, a2, b1, b2; //Numero de eltos int n = b - a + 1; //Evitando el caso trivial if (n > 1) { a1 = a; b1 = (int)(a1 + floor(1.0*n / 2) - 1); a2 = b1 + 1; b2 = b; //Ordenando subarreglos recursivamente Mergesort(x, a1, b1); Mergesort(x, a2, b2); //Mezclando subarreglos ordenados mix(x, a1, b1, a2, b2); }//Fin de caso no trivial } //Funcion para mezclar los subvectores usados en Mergesort_Omp //El fin de esta funcion es decidir un orden adecuado para //mezclar los subvectores ordenados por los threads utilizados //en la funcion Mergesort_Omp. //h es el numero de eltos a ordenar en cada thread: //h = n/num_threads void mix_them(int *x, int n, int h, int num_threads) { //variables de limites de subvectores int a1, b1, a2, b2; //Actuando acorde al numero de threads switch (num_threads) { //No mezcla nada case 1: break; case 2: { //calculando limites a1 = 0; b1 = h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando mix(x, a1, b1, a2, b2); //Fin para 2 threads break; } case 3: { //calculando limites de subvectores 1 y 2 a1 = 0; b1 = h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 1 y 2 = subvector @ mix(x, a1, b1, a2, b2); //calculando limites de subvectores @ y 3 b1 = b2; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores @ y 3 mix(x, a1, b1, a2, b2); //Fin para 3 threads break; } case 4: { //calculando limites de subvectores 1 y 2 a1 = 0; b1 = h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 1 y 2 = subvector @ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 3 y 4 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores 3 y 4 = subvector $ mix(x, a1, b1, a2, b2); //calculando limites de subvectores @ y $ b1 = a1 - 1; a1 = 0; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores @ y $ mix(x, a1, b1, a2, b2); //Fin para 4 threads break; } case 5: { //calculando limites de subvectores 1 y 2 a1 = 0; b1 = h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 1 y 2 = subvector @ mix(x, a1, b1, a2, b2); //calculando limites de subvectores @ y 3 a1 = 0; b1 = b2; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores @ y 3 = subvector $ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 4 y 5 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores 4 y 5 = subvector % mix(x, a1, b1, a2, b2); //calculando limites de subvectores $ y % b1 = a1 - 1; a1 = 0; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores $ y % mix(x, a1, b1, a2, b2); //Fin para 5 threads break; } case 6: { //calculando limites de subvectores 1 y 2 a1 = 0; b1 = h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 1 y 2 = subvector @ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 3 y 4 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 3 y 4 = subvector $ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 5 y 6 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores 5 y 6 = subvector & mix(x, a1, b1, a2, b2); //calculando limites de subvectores @ y $ a1 = 0; b1 = 2 * h - 1; a2 = b1 + 1; b2 = a2 + 2 * h - 1; //mezclando subvectores @ y $ = subvector % mix(x, a1, b1, a2, b2); //calculando limites de subvectores % y & a1 = 0; b1 = b2; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores % y & mix(x, a1, b1, a2, b2); //Fin para 6 threads break; } case 7: { //calculando limites de subvectores 1 y 2 a1 = 0; b1 = h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 1 y 2 = subvector @ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 3 y 4 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 3 y 4 = subvector $ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 5 y 6 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 5 y 6 = subvector & mix(x, a1, b1, a2, b2); //calculando limites de subvectores @ y $ a1 = 0; b1 = 2 * h - 1; a2 = b1 + 1; b2 = a2 + 2 * h - 1; //mezclando subvectores @ y $ = subvector % mix(x, a1, b1, a2, b2); //calculando limites de subvectores & y 7 a1 = b2 + 1; b1 = a1 + 2 * h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores @ y $ = subvector # mix(x, a1, b1, a2, b2); //calculando limites de subvectores % y # a1 = 0; b1 = 4 * h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores % y # mix(x, a1, b1, a2, b2); //Fin para 7 threads break; } case 8: { //calculando limites de subvectores 1 y 2 a1 = 0; b1 = h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 1 y 2 = subvector @ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 3 y 4 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 3 y 4 = subvector $ mix(x, a1, b1, a2, b2); //calculando limites de subvectores 5 y 6 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = a2 + h - 1; //mezclando subvectores 5 y 6 = subvector & mix(x, a1, b1, a2, b2); //calculando limites de subvectores 7 y 8 a1 = b2 + 1; b1 = a1 + h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores 7 y 8 = subvector # mix(x, a1, b1, a2, b2); //calculando limites de subvectores @ y $ a1 = 0; b1 = 2 * h - 1; a2 = b1 + 1; b2 = a2 + 2 * h - 1; //mezclando subvectores @ y $ = subvector % mix(x, a1, b1, a2, b2); //calculando limites de subvectores & y # a1 = b2 + 1; b1 = a1 + 2 * h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores & y # = subvector ~ mix(x, a1, b1, a2, b2); //calculando limites de subvectores % y ~ a1 = 0; b1 = 4 * h - 1; a2 = b1 + 1; b2 = n - 1; //mezclando subvectores % y ~ mix(x, a1, b1, a2, b2); //Fin para 8 threads break; } default: { } } } //Mergesort con openmp int Mergesort_Omp(int *x, int n) { //Obteniendo numero de threads int num_threads = omp_get_max_threads(), i; //Obteniendo # de eltos a ordenar por cada thread int h = n / num_threads; //Indices del arreglo original de inicio y final de cada thread int *I_start = (int*)calloc(num_threads, sizeof(int)); int *I_end = (int*)calloc(num_threads, sizeof(int)); //Llenando el indice de arreglos. Lo llenamos de esta manera para //q el ultimo indice de final sea n-1 for (i = 0; i < num_threads - 1; i++) { I_start[i] = i * h; I_end[i] = I_start[i] + h - 1; } I_start[i] = i * h; I_end[i] = n - 1; //Ordenando los num_threads subarreglos #pragma omp parallel for for (i = 0; i < num_threads; i++) { //Obteniendo numero de thread int my_rank = omp_get_thread_num(); //Ordenando su segmento Mergesort(x, I_start[my_rank], I_end[my_rank]); } //Ordenando los subarreglos entre si mix_them(x, n, h, num_threads); //Liberando memoria free(I_start); free(I_end); return 0; } //end merge sort ------------------------------------------------------------------- int N_OMP_SORT_FUNCS = 8; typedef int(*omp_f) (int*, int); omp_f omp_funcs[] = { &omp_bubble_sort, &omp_odd_even_sort, &QuickSort, &RadixSort, &Mergesort_Omp,&bitonic, &omp_rank_sort, &omp_counting_sort }; const char *omp_funcs_names[] = { "omp_bubble_sort", "omp_odd_even_sort", "QuickSort", "RadixSort", "Mergesort_Omp", "bitonic", "omp_rank_sort", "omp_counting_sort" }; #endif
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ case mshadow::kBfloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; template<typename OP, int req> struct mixed_type_unary_op { typedef OP Operation; /*! \brief input is one tensor */ template<typename OType, typename IType> MSHADOW_XINLINE static void Map(index_t i, OType *out, const IType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(OType(in[i]))); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is two tensors with different type and with a boolean output tensor */ template<typename LType, typename RType, typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const LType *lhs, const RType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } #ifndef _WIN32 /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } #endif /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<bool val> struct set_to_bool : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to true and false */ using set_true = set_to_bool<true>; using set_false = set_to_bool<false>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
kmp_glt_csupport.c
/* * kmp_abt_csupport.c -- kfront linkage support for OpenMP. */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "omp.h" /* extern "C" declarations of user-visible routines */ #include "kmp_glt.h" #include "kmp_glt_i18n.h" #include "kmp_glt_error.h" #include "kmp_glt_stats.h" #define MAX_MESSAGE 512 /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /* flags will be used in future, e.g., to implement */ /* openmp_strict library restrictions */ /*! * @ingroup STARTUP_SHUTDOWN * @param loc in source location information * @param flags in for future use (currently ignored) * * Initialize the runtime library. This call is optional; if it is not made then * it will be implicitly called by attempts to use other library functions. * */ void __kmpc_begin(ident_t *loc, kmp_int32 flags) { //printf("kmpc_begin\n"); // By default __kmp_ignore_mppbeg() returns TRUE. if (__kmp_ignore_mppbeg() == FALSE) { __kmp_internal_begin(); KC_TRACE( 10, ("__kmpc_begin: called\n" ) ); } } /*! * @ingroup STARTUP_SHUTDOWN * @param loc source location information * * Shutdown the runtime library. This is also optional, and even if called will not * do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero. */ void __kmpc_end(ident_t *loc) { //printf("kmpc_end\n"); // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op. // However, this can be overridden with KMP_IGNORE_MPPEND environment variable. // If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end() // will unregister this root (it can cause library shut down). if (__kmp_ignore_mppend() == FALSE) { KC_TRACE( 10, ("__kmpc_end: called\n" ) ); KA_TRACE( 30, ("__kmpc_end\n" )); __kmp_internal_end_thread( -1 ); } } /*! @ingroup THREAD_STATES @param loc Source location information. @return The global thread index of the active thread. This function can be called in any context. If the runtime has ony been entered at the outermost level from a single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that which would be returned by omp_get_thread_num() in the outermost active parallel construct. (Or zero if there is no active parallel construct, since the master thread is necessarily thread zero). If multiple non-OpenMP threads all enter an OpenMP construct then this will be a unique thread identifier among all the threads created by the OpenMP runtime (but the value cannote be defined in terms of OpenMP thread ids returned by omp_get_thread_num()). */ kmp_int32 __kmpc_global_thread_num(ident_t *loc) { kmp_int32 gtid = __kmp_entry_gtid(); KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) ); return gtid; } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads under control of the OpenMP<sup>*</sup> runtime This function can be called in any context. It returns the total number of threads under the control of the OpenMP runtime. That is not a number that can be determined by any OpenMP standard calls, since the library may be called from more than one non-OpenMP thread, and this reflects the total over all such calls. Similarly the runtime maintains underlying threads even when they are not active (since the cost of creating and destroying OS threads is high), this call counts all such threads even if they are not waiting for work. */ kmp_int32 __kmpc_global_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_global.nth ) ); return TCR_4(__kmp_global.nth); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The thread number of the calling thread in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_thread_num(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) ); return __kmp_tid_from_gtid( __kmp_entry_gtid() ); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) ); return __kmp_entry_thread() -> th.th_team -> t.t_nproc; } /*! * @ingroup DEPRECATED * @param loc location description * * This function need not be called. It always returns TRUE. */ kmp_int32 __kmpc_ok_to_fork(ident_t *loc) { #ifndef KMP_DEBUG return TRUE; #else const char *semi2; const char *semi3; int line_no; if (__kmp_par_range == 0) { return TRUE; } semi2 = loc->psource; if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2, ';'); if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2 + 1, ';'); if (semi2 == NULL) { return TRUE; } if (__kmp_par_range_filename[0]) { const char *name = semi2 - 1; while ((name > loc->psource) && (*name != '/') && (*name != ';')) { name--; } if ((*name == '/') || (*name == ';')) { name++; } if (strncmp(__kmp_par_range_filename, name, semi2 - name)) { return __kmp_par_range < 0; } } semi3 = strchr(semi2 + 1, ';'); if (__kmp_par_range_routine[0]) { if ((semi3 != NULL) && (semi3 > semi2) && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) { return __kmp_par_range < 0; } } if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) { if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) { return __kmp_par_range > 0; } return __kmp_par_range < 0; } return TRUE; #endif /* KMP_DEBUG */ } /*! @ingroup THREAD_STATES @param loc Source location information. @return 1 if this thread is executing inside an active parallel region, zero if not. */ kmp_int32 __kmpc_in_parallel( ident_t *loc ) { return __kmp_entry_thread() -> th.th_root -> r.r_active; } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_threads number of threads requested for this parallel construct Set the number of threads to be used by the next fork spawned by this thread. This call is only required if the parallel construct has a `num_threads` clause. */ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n", global_tid, num_threads ) ); __kmp_push_num_threads( loc, global_tid, num_threads ); } void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid ) { KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) ); /* the num_threads are automatically popped */ } #if OMP_40_ENABLED void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind ) { KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid, proc_bind ) ); __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind ); } #endif /* OMP_40_ENABLED */ /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined parallel construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); //printf("******************kmpc_fork_call %d\n", gtid); fflush(NULL); // maybe to save thr_state is enough here { va_list ap; va_start( ap, microtask ); // printf("kmpc_fork_call\n"); #ifdef KMP_GLT_USE_TASKLET_TEAM kmp_info_t *this_thr = __kmp_global.threads[ gtid ]; if (get__tasklet(this_thr)) { set__tasklet(this_thr,FTN_FALSE); __kmp_fork_join_tasklet_team( loc, gtid, fork_context_intel, argc, VOLATILE_CAST(microtask_t) microtask, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_task_func, /* TODO: revert workaround for Intel(R) 64 tracker #96 */ #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); } else { #endif #if INCLUDE_SSC_MARKS SSC_MARK_FORKING(); #endif // printf("kmpc_fork_call antes fork\n"); __kmp_fork_call( loc, gtid, fork_context_intel, argc, VOLATILE_CAST(microtask_t) microtask, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_task_func, /* TODO: revert workaround for Intel(R) 64 tracker #96 */ #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); #if INCLUDE_SSC_MARKS SSC_MARK_JOINING(); #endif //printf("kmpc_fork_call antes join\n"); //printf("******************kmpc_fork_call (join) %d\n", gtid); __kmp_join_call( loc, gtid ); #ifdef KMP_GLT_USE_TASKLET_TEAM } #endif va_end( ap ); } // printf("kmpc_fork_call fiiiiinn\n"); } #if OMP_40_ENABLED /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_teams number of teams requested for the teams construct @param num_threads number of threads per team requested for the teams construct Set the number of teams to be used by the teams construct. This call is only required if the teams construct has a `num_teams` clause or a `thread_limit` clause (or both). */ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n", global_tid, num_teams, num_threads ) ); __kmp_push_num_teams( loc, global_tid, num_teams, num_threads ); } /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined teams construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); kmp_info_t *this_thr = __kmp_global.threads[ gtid ]; va_list ap; va_start( ap, microtask ); KMP_COUNT_BLOCK(OMP_TEAMS); // remember teams entry point and nesting level this_thr->th.th_teams_microtask = microtask; this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host // check if __kmpc_push_num_teams called, set default number of teams otherwise if ( this_thr->th.th_teams_size.nteams == 0 ) { __kmp_push_num_teams( loc, gtid, 0, 0 ); } KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1); __kmp_fork_call( loc, gtid, fork_context_intel, argc, VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); __kmp_join_call( loc, gtid ); this_thr->th.th_teams_microtask = NULL; this_thr->th.th_teams_level = 0; *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L; va_end( ap ); } #endif /* OMP_40_ENABLED */ // // I don't think this function should ever have been exported. // The __kmpc_ prefix was misapplied. I'm fairly certain that no generated // openmp code ever called it, but it's been exported from the RTL for so // long that I'm afraid to remove the definition. // int __kmpc_invoke_task_func( int gtid ) { return __kmp_invoke_task_func( gtid ); } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Enter a serialized parallel construct. This interface is used to handle a conditional parallel region, like this, @code #pragma omp parallel if (condition) @endcode when the condition is false. */ void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with * kmp_fork_call since the tasks to be done are similar in each case. */ } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Leave a serialized parallel construct. */ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { kmp_internal_control_t *top; kmp_info_t *this_thr; kmp_team_t *serial_team; KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) ); /* skip all this code for autopar serialized loops since it results in unacceptable overhead */ if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) return; // Not autopar code if( ! TCR_4( __kmp_global.init_parallel ) ) __kmp_parallel_initialize(); this_thr = __kmp_global.threads[ global_tid ]; serial_team = this_thr->th.th_serial_team; #if OMP_41_ENABLED kmp_task_team_t * task_team = this_thr->th.th_task_team; // we need to wait for the proxy tasks before finishing the thread if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) __kmp_task_team_wait(this_thr, serial_team, 1); #endif KMP_MB(); KMP_DEBUG_ASSERT( serial_team ); KMP_ASSERT( serial_team -> t.t_serialized ); KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team ); KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr ); /* If necessary, pop the internal control stack values and replace the team values */ top = serial_team -> t.t_control_stack_top; if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) { copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top ); serial_team -> t.t_control_stack_top = top -> next; __kmp_free(top); } //if( serial_team -> t.t_serialized > 1 ) serial_team -> t.t_level--; /* pop dispatch buffers stack */ KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer); { dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer; serial_team->t.t_dispatch->th_disp_buffer = serial_team->t.t_dispatch->th_disp_buffer->next; __kmp_free( disp_buffer ); } -- serial_team -> t.t_serialized; if ( serial_team -> t.t_serialized == 0 ) { /* return to the parallel section */ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 if ( __kmp_global.inherit_fp_control && serial_team->t.t_fp_control_saved ) { __kmp_clear_x87_fpu_status_word(); __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word ); __kmp_load_mxcsr( &serial_team->t.t_mxcsr ); } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ this_thr -> th.th_team = serial_team -> t.t_parent; this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid; /* restore values cached in the thread */ this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */ this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */ this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized; /* TODO the below shouldn't need to be adjusted for serialized teams */ this_thr -> th.th_dispatch = & this_thr -> th.th_team -> t.t_dispatch[ serial_team -> t.t_master_tid ]; __kmp_pop_current_task_from_thread( this_thr ); KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 ); this_thr -> th.th_current_task -> td_flags.executing = 1; if ( __kmp_global.tasking_mode != tskm_immediate_exec ) { // Copy the task team from the new child / old parent team to the thread. this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]; KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n", global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) ); } } else { if ( __kmp_global.tasking_mode != tskm_immediate_exec ) { KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n", global_tid, serial_team, serial_team -> t.t_serialized ) ); } } if ( __kmp_global.env_consistency_check ) __kmp_pop_parallel( global_tid, NULL ); } /*! @ingroup SYNCHRONIZATION @param loc source location information. Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though depending on the memory ordering convention obeyed by the compiler even that may not be necessary). */ void __kmpc_flush(ident_t *loc) { KC_TRACE( 10, ("__kmpc_flush: called\n" ) ); /* need explicit __mf() here since use volatile instead in library */ KMP_MB(); /* Flush all pending memory write invalidates. */ #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) #if KMP_MIC // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used. // We shouldn't need it, though, since the ABI rules require that // * If the compiler generates NGO stores it also generates the fence // * If users hand-code NGO stores they should insert the fence // therefore no incomplete unordered stores should be visible. #else // C74404 // This is to address non-temporal store instructions (sfence needed). // The clflush instruction is addressed either (mfence needed). // Probably the non-temporal load monvtdqa instruction should also be addressed. // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2. if ( ! __kmp_global.cpuinfo.initialized ) { __kmp_query_cpuid( & __kmp_global.cpuinfo ); }; // if if ( ! __kmp_global.cpuinfo.sse2 ) { // CPU cannot execute SSE2 instructions. } else { #if KMP_COMPILER_ICC || KMP_COMPILER_MSVC _mm_mfence(); #else __sync_synchronize(); #endif // KMP_COMPILER_ICC }; // if #endif // KMP_MIC #elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64) // Nothing to see here move along #elif KMP_ARCH_PPC64 // Nothing needed here (we have a real MB above). #if KMP_OS_CNK // The flushing thread needs to yield here; this prevents a // busy-waiting thread from saturating the pipeline. flush is // often used in loops like this: // while (!flag) { // #pragma omp flush(flag) // } // and adding the yield here is good for at least a 10x speedup // when running >2 threads per core (on the NAS LU benchmark). __kmp_yield(TRUE); #endif #else #error Unknown or unsupported architecture #endif } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ static __forceinline int __kmp_global_get_crit_lock_id( kmp_critical_name *crit ) { unsigned id = *(unsigned *)crit; return ((id & (id >> 16)) % KMP_NUM_CRIT_LOCKS); } static __forceinline void __kmp_global_enter_critical_section( kmp_int32 global_tid, kmp_critical_name * crit ) { int lock_id = __kmp_global_get_crit_lock_id( crit ); __kmp_acquire_lock( &__kmp_global.crit_lock[lock_id], global_tid ); KA_TRACE( 20, ( "__kmp_global_enter_critical_section(): T#%d: acquired lock#%d\n", global_tid, lock_id ) ); } static __forceinline void __kmp_global_end_critical_section( kmp_int32 global_tid, kmp_critical_name * crit ) { int lock_id = __kmp_global_get_crit_lock_id( crit ); __kmp_release_lock( &__kmp_global.crit_lock[lock_id], global_tid ); KA_TRACE( 20, ( "__kmp_global_end_critical_section(): T#%d: released lock#%d\n", global_tid, lock_id ) ); } static __forceinline int __kmp_team_get_lock_id( kmp_critical_name *crit ) { unsigned id = *(unsigned *)crit; return ((id & (id >> 16)) % KMP_TEAM_NUM_LOCKS); } static __forceinline void __kmp_team_enter_critical_section( kmp_int32 global_tid, kmp_critical_name * crit ) { int lock_id = __kmp_team_get_lock_id( crit ); kmp_team_t *team = __kmp_team_from_gtid( global_tid ); __kmp_acquire_lock( &team->t.t_lock[lock_id], global_tid ); KA_TRACE( 20, ( "__kmp_team_enter_critical_section(): team %d (T#%d): acquired lock#%d\n", team->t.t_id, global_tid, lock_id ) ); } static __forceinline void __kmp_team_end_critical_section( kmp_int32 global_tid, kmp_critical_name * crit ) { int lock_id = __kmp_team_get_lock_id( crit ); kmp_team_t *team = __kmp_team_from_gtid( global_tid ); __kmp_release_lock( &team->t.t_lock[lock_id], global_tid ); KA_TRACE( 20, ( "__kmp_team_end_critical_section(): team %d (T#%d): released lock#%d\n", team->t.t_id, global_tid, lock_id ) ); } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Execute a barrier. */ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_BARRIER); KMP_TIME_BLOCK(OMP_barrier); KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_global.init_parallel)) __kmp_parallel_initialize(); if ( __kmp_global.env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? }; // if __kmp_check_barrier( global_tid, ct_barrier, loc ); } __kmp_global.threads[ global_tid ]->th.th_ident = loc; // TODO: explicit barrier_wait_id: // this function is called when 'barrier' directive is present or // implicit barrier at the end of a worksharing construct. // 1) better to add a per-thread barrier counter to a thread data structure // 2) set to 0 when a new team is created // 4) no sync is required __kmp_barrier( global_tid ); } /* The BARRIER for a MASTER section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise. */ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) { int status = 0; kmp_info_t *th; th = __kmp_global.threads[ global_tid ]; KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) ); if( ! TCR_4( __kmp_global.init_parallel ) ) __kmp_parallel_initialize(); if( KMP_MASTER_GTID( global_tid )) { KMP_COUNT_BLOCK(OMP_MASTER); KMP_START_EXPLICIT_TIMER(OMP_master); status = 1; th->th.th_single_or_master = status; } if ( __kmp_global.env_consistency_check ) { if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL ); else __kmp_check_sync( global_tid, ct_master, loc, NULL ); } return status; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . Mark the end of a <tt>master</tt> region. This should only be called by the thread that executes the <tt>master</tt> region. */ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) ); KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid )); KMP_STOP_EXPLICIT_TIMER(OMP_master); if ( __kmp_global.env_consistency_check ) { if( global_tid < 0 ) KMP_WARNING( ThreadIdentInvalid ); if( KMP_MASTER_GTID( global_tid )) __kmp_pop_sync( global_tid, ct_master, loc ); } kmp_info_t *th; th = __kmp_global.threads[ global_tid ]; th->th.th_single_or_master = 0; } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. Start execution of an <tt>ordered</tt> construct. */ void __kmpc_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KMP_DEBUG_ASSERT( __kmp_global.init_serial ); KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid )); if (! TCR_4(__kmp_global.init_parallel)) __kmp_parallel_initialize(); th = __kmp_global.threads[ gtid ]; if ( th -> th.th_dispatch -> th_deo_fcn != 0 ) (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_deo( & gtid, & cid, loc ); } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. End execution of an <tt>ordered</tt> construct. */ void __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) ); th = __kmp_global.threads[ gtid ]; if ( th -> th.th_dispatch -> th_dxo_fcn != 0 ) (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_dxo( & gtid, & cid, loc ); } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Enter code protected by a `critical` construct. This function blocks until the executing thread can enter the critical section. */ void __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { KMP_COUNT_BLOCK(OMP_CRITICAL); KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); /* since the critical directive binds to all threads, not just * the current team we have to check this even if we are in a * serialized team */ /* also, even if we are the uber thread, we still have to conduct the lock, * as we have to contend with sibling threads */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_global_enter_critical_section( global_tid, crit ); KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Leave a critical section, releasing any lock that was held during its execution. */ void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit) { KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid )); // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_global_end_critical_section( global_tid, crit ); KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid )); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master. The barrier is executed inside this function. */ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { int status; KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_global.init_parallel)) __kmp_parallel_initialize(); if ( __kmp_global.env_consistency_check ) __kmp_check_barrier( global_tid, ct_barrier, loc ); status = __kmp_begin_split_barrier( global_tid ); return (status != 0) ? 0 : 1; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Complete the execution of a combined barrier and master. This function should only be called at the completion of the <tt>master</tt> code. Other threads will still be waiting at the barrier and this call releases them. */ void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid )); __kmp_end_split_barrier ( global_tid ); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master(nowait) construct. The barrier is executed inside this function. There is no equivalent "end" function, since the */ kmp_int32 __kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid ) { kmp_int32 ret; KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid )); if (! TCR_4(__kmp_global.init_parallel)) __kmp_parallel_initialize(); if ( __kmp_global.env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? } __kmp_check_barrier( global_tid, ct_barrier, loc ); } __kmp_barrier( global_tid ); ret = __kmpc_master (loc, global_tid); if ( __kmp_global.env_consistency_check ) { /* there's no __kmpc_end_master called; so the (stats) */ /* actions of __kmpc_end_master are done here */ if ( global_tid < 0 ) { KMP_WARNING( ThreadIdentInvalid ); } if (ret) { /* only one thread should do the pop since only */ /* one did the push (see __kmpc_master()) */ __kmp_pop_sync( global_tid, ct_master, loc ); } } return (ret); } /* The BARRIER for a SINGLE process section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number @return One if this thread should execute the single construct, zero otherwise. Test whether to execute a <tt>single</tt> construct. There are no implicit barriers in the two "single" calls, rather the compiler should introduce an explicit barrier if it is required. */ kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) { kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE ); if (rc) { // We are going to execute the single statement, so we should count it. KMP_COUNT_BLOCK(OMP_SINGLE); KMP_START_EXPLICIT_TIMER(OMP_single); } return rc; } /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number Mark the end of a <tt>single</tt> construct. This function should only be called by the thread that executed the block of code protected by the `single` construct. */ void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) { __kmp_exit_single( global_tid ); KMP_STOP_EXPLICIT_TIMER(OMP_single); } /*! @ingroup WORK_SHARING @param loc Source location @param global_tid Global thread id Mark the end of a statically scheduled loop. */ void __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid ) { KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid)); if ( __kmp_global.env_consistency_check ) __kmp_pop_workshare( global_tid, ct_pdo, loc ); } /* * User routines which take C-style arguments (call by value) * different from the Fortran equivalent routines */ void ompc_set_num_threads( int arg ) { // !!!!! TODO: check the per-task binding __kmp_set_num_threads( arg, __kmp_entry_gtid() ); } void ompc_set_dynamic( int flag ) { kmp_info_t *thread; /* For the thread-private implementation of the internal controls */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__dynamic( thread, flag ? TRUE : FALSE ); } void ompc_set_nested( int flag ) { kmp_info_t *thread; /* For the thread-private internal controls implementation */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__nested( thread, flag ? TRUE : FALSE ); } void ompc_set_max_active_levels( int max_active_levels ) { /* TO DO */ /* we want per-task implementation of this internal control */ /* For the per-thread internal controls implementation */ __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels ); } void ompc_set_schedule( omp_sched_t kind, int modifier ) { // !!!!! TODO: check the per-task binding __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier ); } int ompc_get_ancestor_thread_num( int level ) { return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level ); } int ompc_get_team_size( int level ) { return __kmp_get_team_size( __kmp_entry_gtid(), level ); } void kmpc_set_stacksize( int arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_stacksize_s( size_t arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_blocktime( int arg ) { int gtid, tid; kmp_info_t *thread; gtid = __kmp_entry_gtid(); tid = __kmp_tid_from_gtid(gtid); thread = __kmp_thread_from_gtid(gtid); __kmp_aux_set_blocktime( arg, thread, tid ); } void kmpc_set_library( int arg ) { // __kmp_user_set_library initializes the library if needed __kmp_user_set_library( (enum library_type)arg ); } void kmpc_set_defaults( char const * str ) { // __kmp_aux_set_defaults initializes the library if needed __kmp_aux_set_defaults( str, KMP_STRLEN( str ) ); } int kmpc_set_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_set_affinity_mask_proc( proc, mask ); #endif } int kmpc_unset_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_unset_affinity_mask_proc( proc, mask ); #endif } int kmpc_get_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_get_affinity_mask_proc( proc, mask ); #endif } /* -------------------------------------------------------------------------- */ /*! @ingroup THREADPRIVATE @param loc source location information @param gtid global thread number @param cpy_size size of the cpy_data buffer @param cpy_data pointer to data to be copied @param cpy_func helper function to call for copying data @param didit flag variable: 1=single thread; 0=not single thread __kmpc_copyprivate implements the interface for the private data broadcast needed for the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran). All threads participating in the parallel region call this routine. One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1 and all other threads should have that variable set to 0. All threads pass a pointer to a data buffer (cpy_data) that they have built. The OpenMP specification forbids the use of nowait on the single region when a copyprivate clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid race conditions, so the code generation for the single region should avoid generating a barrier after the call to @ref __kmpc_copyprivate. The <tt>gtid</tt> parameter is the global thread id for the current thread. The <tt>loc</tt> parameter is a pointer to source location information. Internal implementation: The single thread will first copy its descriptor address (cpy_data) to a team-private location, then the other threads will each call the function pointed to by the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer. The cpy_func routine used for the copy and the contents of the data area defined by cpy_data and cpy_size may be built in any fashion that will allow the copy to be done. For instance, the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers to the data. The cpy_func routine must interpret the cpy_data buffer appropriately. The interface to cpy_func is as follows: @code void cpy_func( void *destination, void *source ) @endcode where void *destination is the cpy_data pointer for the thread being copied to and void *source is the cpy_data pointer for the thread being copied from. */ void __kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit ) { void **data_ptr; KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid )); KMP_MB(); data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data; if ( __kmp_global.env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); } } /* ToDo: Optimize the following two barriers into some kind of split barrier */ if (didit) *data_ptr = cpy_data; /* This barrier is not a barrier region boundary */ __kmp_barrier( gtid ); if (! didit) (*cpy_func)( cpy_data, *data_ptr ); /* Consider next barrier the user-visible barrier for barrier region boundaries */ /* Nesting checks are already handled by the single construct checks */ __kmp_barrier( gtid ); } /* -------------------------------------------------------------------------- */ /* * TODO: Make check abort messages use location info & pass it * into with_checks routines */ /* initialize the lock */ void __kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { static char const * const func = "omp_init_lock"; kmp_lock_t lck; KMP_DEBUG_ASSERT( __kmp_global.init_serial ); if ( __kmp_global.env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } __kmp_init_lock( &lck ); *(kmp_lock_t *)user_lock = lck; } // __kmpc_init_lock /* initialize the lock */ void __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { static char const * const func = "omp_init_nest_lock"; kmp_lock_t lck; KMP_DEBUG_ASSERT( __kmp_global.init_serial ); if ( __kmp_global.env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } __kmp_init_nest_lock( &lck ); *(kmp_lock_t *)user_lock = lck; } // __kmpc_init_nest_lock void __kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { kmp_lock_t lck = *(kmp_lock_t *)user_lock; __kmp_destroy_lock( &lck ); } // __kmpc_destroy_lock /* destroy the lock */ void __kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { kmp_lock_t lck = *(kmp_lock_t *)user_lock; __kmp_destroy_lock( &lck ); } // __kmpc_destroy_nest_lock void __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { KMP_COUNT_BLOCK(OMP_set_lock); kmp_lock_t lck = *(kmp_lock_t *)user_lock; __kmp_acquire_lock( &lck, gtid ); } void __kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { kmp_lock_t lck = *(kmp_lock_t *)user_lock; __kmp_acquire_lock( &lck, gtid ); } void __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { kmp_lock_t lck = *(kmp_lock_t *)user_lock; __kmp_release_lock( &lck, gtid ); } /* release the lock */ void __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { kmp_lock_t lck = *(kmp_lock_t *)user_lock; __kmp_release_lock( &lck, gtid ); } /* try to acquire the lock */ int __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { KMP_COUNT_BLOCK(OMP_test_lock); int rc; kmp_lock_t lck = *(kmp_lock_t *)user_lock; rc = __kmp_test_lock( &lck, gtid ); return ( rc == GLT_SUCCESS ? FTN_TRUE : FTN_FALSE ); } /* try to acquire the lock */ int __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { int rc; kmp_lock_t lck = *(kmp_lock_t *)user_lock; rc = __kmp_test_lock( &lck, gtid ); return ( rc == GLT_SUCCESS ? FTN_TRUE : FTN_FALSE ); } /*--------------------------------------------------------------------------------------------------------------------*/ /* * Interface to fast scalable reduce methods routines */ // keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions; // another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then) // AT: which solution is better? #define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \ ( ( __kmp_global.threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) ) #define __KMP_GET_REDUCTION_METHOD(gtid) \ ( __kmp_global.threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) // description of the packed_reduction_method variable: look at the macros in kmp.h /* 2.a.i. Reduce Block without a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed The nowait version is used for a reduce clause with the nowait argument. */ kmp_int32 __kmpc_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_nowait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; #if OMP_40_ENABLED kmp_team_t *team; kmp_info_t *th; int teams_swapped = 0, task_state; #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be used as a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_global.init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting if ( __kmp_global.env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #if OMP_40_ENABLED th = __kmp_thread_from_gtid(global_tid); if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct? team = th->th.th_team; if( team->t.t_level == th->th.th_teams_level ) { // this is reduction at teams construct KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0 // Let's swap teams temporarily for the reduction barrier teams_swapped = 1; th->th.th_info.ds.ds_tid = team->t.t_master_tid; th->th.th_team = team->t.t_parent; th->th.th_team_nproc = th->th.th_team->t.t_nproc; th->th.th_task_team = th->th.th_team->t.t_task_team[0]; task_state = th->th.th_task_state; th->th.th_task_state = 0; } } #endif // OMP_40_ENABLED // packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable // the variable should be either a construct-specific or thread-specific property, not a team specific property // (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct) // an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?) // (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed) // a thread-specific variable is better regarding two issues above (next construct and extra syncs) // a thread-specific "th_local.reduction_method" variable is used currently // each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs) packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); /* [SM] TODO: supporting tree reduction? */ if( packed_reduction_method == critical_reduce_block ) { __kmp_team_enter_critical_section( global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; // all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen) // (it's not quite good, because the checking block has been closed by this 'pop', // but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction) if ( __kmp_global.env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } #if OMP_40_ENABLED if( teams_swapped ) { // Restore thread structure th->th.th_info.ds.ds_tid = 0; th->th.th_team = team; th->th.th_team_nproc = team->t.t_nproc; th->th.th_task_team = team->t.t_task_team[task_state]; th->th.th_task_state = task_state; } #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a reduce nowait. */ void __kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); if( packed_reduction_method == critical_reduce_block ) { __kmp_team_end_critical_section( global_tid, lck ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( on Intel platforms only ) } else if( packed_reduction_method == atomic_reduce_block ) { // neither master nor other workers should get here // (code gen does not generate this call in case 2: atomic reduce block) // actually it's better to remove this elseif at all; // after removal this value will checked by the 'else' and will assert } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_global.env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } /* 2.a.ii. Reduce Block with a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed A blocking reduce that includes an implicit barrier. */ kmp_int32 __kmpc_reduce( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_wait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_global.init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting if ( __kmp_global.env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); /* [SM] TODO: supporting tree reduction? */ if( packed_reduction_method == critical_reduce_block ) { __kmp_team_enter_critical_section( global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a blocking reduce. The <tt>lck</tt> pointer must be the same as that used in the corresponding start function. */ void __kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) if( packed_reduction_method == critical_reduce_block ) { __kmp_team_end_critical_section( global_tid, lck ); // TODO: implicit barrier: should be exposed __kmp_barrier( global_tid ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) // TODO: implicit barrier: should be exposed __kmp_barrier( global_tid ); } else if( packed_reduction_method == atomic_reduce_block ) { // TODO: implicit barrier: should be exposed __kmp_barrier( global_tid ); } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_global.env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } #undef __KMP_GET_REDUCTION_METHOD #undef __KMP_SET_REDUCTION_METHOD /*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/ kmp_uint64 __kmpc_get_taskid() { kmp_int32 gtid; kmp_info_t * thread; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); return thread->th.th_current_task->td_task_id; } // __kmpc_get_taskid kmp_uint64 __kmpc_get_parent_taskid() { kmp_int32 gtid; kmp_info_t * thread; kmp_taskdata_t * parent_task; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); parent_task = thread->th.th_current_task->td_parent; return ( parent_task == NULL ? 0 : parent_task->td_task_id ); } // __kmpc_get_parent_taskid void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT) { if ( ! __kmp_global.init_serial ) { __kmp_serial_initialize(); } __kmp_global.place_num_sockets = nS; __kmp_global.place_socket_offset = sO; __kmp_global.place_num_cores = nC; __kmp_global.place_core_offset = cO; __kmp_global.place_num_threads_per_core = nT; } #if OMP_41_ENABLED /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. @param num_dims number of associated doacross loops. @param dims info on loops bounds. Initialize doacross loop information. Expect compiler send us inclusive bounds, e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2. */ void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, struct kmp_dim * dims) { int j, idx; kmp_int64 last, trace_count; kmp_info_t *th = __kmp_global.threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_uint32 *flags; kmp_disp_t *pr_buf = th->th.th_dispatch; dispatch_shared_info_t *sh_buf; KA_TRACE(20,("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n", gtid, num_dims, !team->t.t_serialized)); KMP_DEBUG_ASSERT(dims != NULL); KMP_DEBUG_ASSERT(num_dims > 0); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_init() exit: serialized team\n")); return; // no dependencies if team is serialized } KMP_DEBUG_ASSERT(team->t.t_nproc > 1); idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for the next loop sh_buf = &team->t.t_disp_buffer[idx % KMP_MAX_DISP_BUF]; // Save bounds info into allocated private buffer KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL); pr_buf->th_doacross_info = (kmp_int64*)__kmp_thread_malloc(th, sizeof(kmp_int64)*(4 * num_dims + 1)); KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); pr_buf->th_doacross_info[0] = (kmp_int64)num_dims; // first element is number of dimensions // Save also address of num_done in order to access it later without knowing the buffer index pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done; pr_buf->th_doacross_info[2] = dims[0].lo; pr_buf->th_doacross_info[3] = dims[0].up; pr_buf->th_doacross_info[4] = dims[0].st; last = 5; for( j = 1; j < num_dims; ++j ) { kmp_int64 range_length; // To keep ranges of all dimensions but the first dims[0] if( dims[j].st == 1 ) { // most common case // AC: should we care of ranges bigger than LLONG_MAX? (not for now) range_length = dims[j].up - dims[j].lo + 1; } else { if( dims[j].st > 0 ) { KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo); range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up); range_length = (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1; } } pr_buf->th_doacross_info[last++] = range_length; pr_buf->th_doacross_info[last++] = dims[j].lo; pr_buf->th_doacross_info[last++] = dims[j].up; pr_buf->th_doacross_info[last++] = dims[j].st; } // Compute total trip count. // Start with range of dims[0] which we don't need to keep in the buffer. if( dims[0].st == 1 ) { // most common case trace_count = dims[0].up - dims[0].lo + 1; } else if( dims[0].st > 0 ) { KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo); trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up); trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1; } for( j = 1; j < num_dims; ++j ) { trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges } KMP_DEBUG_ASSERT(trace_count > 0); // Check if shared buffer is not occupied by other loop (idx - KMP_MAX_DISP_BUF) if( idx != sh_buf->doacross_buf_idx ) { // Shared buffer is occupied, wait for it to be free __kmp_wait_yield_4( (kmp_uint32*)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4, NULL ); } // Check if we are the first thread. After the CAS the first thread gets 0, // others get 1 if initialization is in progress, allocated pointer otherwise. flags = (kmp_uint32*)KMP_COMPARE_AND_STORE_RET64( (kmp_int64*)&sh_buf->doacross_flags,NULL,(kmp_int64)1); if( flags == NULL ) { // we are the first thread, allocate the array of flags kmp_int64 size = trace_count / 8 + 8; // in bytes, use single bit per iteration sh_buf->doacross_flags = (kmp_uint32*)__kmp_thread_calloc(th, size, 1); } else if( (kmp_int64)flags == 1 ) { // initialization is still in progress, need to wait while( (volatile kmp_int64)sh_buf->doacross_flags == 1 ) { KMP_YIELD(TRUE); } } KMP_DEBUG_ASSERT((kmp_int64)sh_buf->doacross_flags > 1); // check value of pointer pr_buf->th_doacross_flags = sh_buf->doacross_flags; // save private copy in order to not // touch shared buffer on each iteration KA_TRACE(20,("__kmpc_doacross_init() exit: T#%d\n", gtid)); } void __kmpc_doacross_wait(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_global.threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, up, st; KA_TRACE(20,("__kmpc_doacross_wait() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_wait() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number and check out-of-bounds condition pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; up = pr_buf->th_doacross_info[3]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = vec[0] - lo; } else if( st > 0 ) { if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment if( vec[0] > lo || vec[0] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; up = pr_buf->th_doacross_info[j + 3]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = vec[i] - lo; } else if( st > 0 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 if( vec[i] > lo || vec[i] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; while( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) { KMP_YIELD(TRUE); } KA_TRACE(20,("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_post(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_global.threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, st; KA_TRACE(20,("__kmpc_doacross_post() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_post() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number (same as in "wait" but no out-of-bounds checks) pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case iter_number = vec[0] - lo; } else if( st > 0 ) { iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { iter = vec[i] - lo; } else if( st > 0 ) { iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; if( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) KMP_TEST_THEN_OR32( (kmp_int32*)&pr_buf->th_doacross_flags[iter_number], (kmp_int32)flag ); KA_TRACE(20,("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_fini(ident_t *loc, int gtid) { kmp_int64 num_done; kmp_info_t *th = __kmp_global.threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf = th->th.th_dispatch; KA_TRACE(20,("__kmpc_doacross_fini() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_fini() exit: serialized team %p\n", team)); return; // nothing to do } num_done = KMP_TEST_THEN_INC64((kmp_int64*)pr_buf->th_doacross_info[1]) + 1; if( num_done == th->th.th_team_nproc ) { // we are the last thread, need to free shared resources int idx = pr_buf->th_doacross_buf_idx - 1; dispatch_shared_info_t *sh_buf = &team->t.t_disp_buffer[idx % KMP_MAX_DISP_BUF]; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] == (kmp_int64)&sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx); __kmp_thread_free(th, (void*)sh_buf->doacross_flags); sh_buf->doacross_flags = NULL; sh_buf->doacross_num_done = 0; sh_buf->doacross_buf_idx += KMP_MAX_DISP_BUF; // free buffer for future re-use } // free private resources (need to keep buffer index forever) __kmp_thread_free(th, (void*)pr_buf->th_doacross_info); pr_buf->th_doacross_info = NULL; KA_TRACE(20,("__kmpc_doacross_fini() exit: T#%d\n", gtid)); } #endif // end of file //
b_numbers.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void print_array(int **a, const int length) { for (int y = 0; y < length; y++) { for (int x = 0; x < length; x++) { printf("%d ", a[y][x]); } printf("\n"); } printf("\n"); } int main(const int argc, const char *argv[]) { if (argc < 3) { printf("Additional arguments required\n"); return -1; } const int n = atoi(argv[1]); const int R = atoi(argv[2]); if (argc >= 4) { srand(atoi(argv[3])); } else { srand(123); } const double start_time = omp_get_wtime(); const int n_threads = omp_get_max_threads(); int **a = malloc(sizeof(int *) * n); int *round_info = malloc(sizeof(int) * R); int ***round_impacts = malloc(sizeof(int **) * R); for (int r = 0; r < R; r++) { const int k = rand() % 10; round_info[r] = k; round_impacts[r] = malloc(sizeof(int *) * k); for (int w = 0; w < k; ++w) { const int z = rand() % (n/4); // int-Division const int i = rand() % n; //y const int j = rand() % n; //x round_impacts[r][w] = malloc(sizeof(int) * 3); round_impacts[r][w][0] = z; round_impacts[r][w][1] = i; round_impacts[r][w][2] = j; } } int max = 0; #pragma omp parallel default(none) shared(a) firstprivate(round_info, round_impacts) reduction(max:max) { #pragma omp for schedule(static, 1) nowait for (int y = 0; y < n; y++) { a[y] = calloc(n, sizeof(int)); } for (int r = 0; r < R; ++r) { if (n <= 16) { #pragma omp master { printf("Eingeschlagen:"); } } const int k = round_info[r]; for (int w = 0; w < k; ++w) { const int z = round_impacts[r][w][0]; const int i = round_impacts[r][w][1]; const int j = round_impacts[r][w][2]; if (n <= 16) { #pragma omp master { printf(" %d(%d, %d)", z, i, j); } } const int reach = z - 1; const int lower_x_bound = (j - reach > 0) ? j - reach : 0; const int upper_x_bound = (reach + j < n) ? reach + j : n - 1; const int lower_y_bound = (i - reach > 0) ? i - reach : 0; const int upper_y_bound = (reach + i < n) ? reach + i : n - 1; const int alignment_offset = lower_y_bound % n_threads; const int alignment = lower_y_bound - alignment_offset; #pragma omp for schedule(static, 1) nowait for (int y = alignment; y <= upper_y_bound; y++) { if (y < lower_y_bound) { continue; } const int y_distance = abs(y - i); const int constant_start = (j - y_distance < lower_x_bound) ? lower_x_bound : j - y_distance; const int constant_end = (j + y_distance > upper_x_bound) ? upper_x_bound : j + y_distance; const int constant_value = z - y_distance; for (int x = lower_x_bound; x < constant_start; x++) { a[y][x] += z - j + x; } for (int x = constant_start; x <= constant_end; x++) { a[y][x] += constant_value; } for (int x = constant_end + 1; x <= upper_x_bound; x++) { a[y][x] += z + j - x; } } } if (n <= 16) { #pragma omp barrier #pragma omp master { printf("\n"); print_array(a, n); } } #pragma omp barrier } #pragma omp for schedule(static, 1) for (int y = 0; y < n; y++) { for (int x = 0; x < n; x++) { if (a[y][x] > max) { max = a[y][x]; } } } } const int end_time = omp_get_wtime(); printf("time = %fs\n", end_time - start_time); printf("max = %d\n", max); if (n > 80) { printf("a[80][15] = %d\n", a[80][15]); } // int i, j; // printf("Gewuenschte i-Koordinate = "); // scanf("%d", &i); // printf("Gewuenschte j-Koordinate = "); // scanf("%d", &j); // if (0 < i && i < n && 0 < j && j < n) // { // printf("a[%d][%d]=%d\n", i, j, a[i][j]); // } // else // { // printf("Ungueltige Koordianten\n"); // } return 0; }
direct_nbody.h
void direct_nbody( const void *krnl_param, kernel_eval_fptr krnl_eval, const int pt_dim, const int krnl_dim, const DTYPE *src_coord, const int src_coord_ld, const int n_src_pt, const DTYPE *src_val, const DTYPE *dst_coord, const int dst_coord_ld, const int n_dst_pt, DTYPE *dst_val ) { const int npt_blk = 256; const int blk_size = npt_blk * krnl_dim; const int n_thread = omp_get_max_threads(); memset(dst_val, 0, sizeof(DTYPE) * n_dst_pt * krnl_dim); DTYPE *krnl_mat_buffs = (DTYPE*) malloc(sizeof(DTYPE) * n_thread * blk_size * blk_size); assert(krnl_mat_buffs != NULL); #pragma omp parallel { int tid = omp_get_thread_num(); DTYPE *krnl_mat_buff = krnl_mat_buffs + tid * blk_size * blk_size; int tid_dst_pt_s, tid_dst_pt_n, tid_dst_pt_e; calc_block_spos_len(n_dst_pt, n_thread, tid, &tid_dst_pt_s, &tid_dst_pt_n); tid_dst_pt_e = tid_dst_pt_s + tid_dst_pt_n; for (int dst_pt_idx = tid_dst_pt_s; dst_pt_idx < tid_dst_pt_e; dst_pt_idx += npt_blk) { int dst_pt_blk = (dst_pt_idx + npt_blk > tid_dst_pt_e) ? (tid_dst_pt_e - dst_pt_idx) : npt_blk; int krnl_mat_nrow = dst_pt_blk * krnl_dim; const DTYPE *dst_coord_ptr = dst_coord + dst_pt_idx; DTYPE *dst_val_ptr = dst_val + dst_pt_idx * krnl_dim; for (int src_pt_idx = 0; src_pt_idx < n_src_pt; src_pt_idx += npt_blk) { int src_pt_blk = (src_pt_idx + npt_blk > n_src_pt) ? (n_src_pt - src_pt_idx) : npt_blk; int krnl_mat_ncol = src_pt_blk * krnl_dim; const DTYPE *src_coord_ptr = src_coord + src_pt_idx; const DTYPE *src_val_ptr = src_val + src_pt_idx * krnl_dim; krnl_eval( dst_coord_ptr, dst_coord_ld, dst_pt_blk, src_coord_ptr, src_coord_ld, src_pt_blk, krnl_param, krnl_mat_buff, krnl_mat_ncol ); CBLAS_GEMV( CblasRowMajor, CblasNoTrans, krnl_mat_nrow, krnl_mat_ncol, 1.0, krnl_mat_buff, krnl_mat_ncol, src_val_ptr, 1, 1.0, dst_val_ptr, 1 ); } } } //printf("Calculate direct n-body reference results for %d points done\n", n_dst_pt); free(krnl_mat_buffs); }
ten_tusscher_2004_epi_S3_9.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_9.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5013992162453,0.00130861673751888,0.778065207799721,0.777945271471961,0.000176364646931449,0.484394446924523,0.00295432941296383,0.999998325858239,1.95514472517084e-08,1.90799805126353e-05,0.999773537725853,1.00682358047437,0.999989828965692,5.15136550864487e-05,0.464168596431909,10.1029140201320,139.520992982978}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2867983319107,0.000339243817256355,0.000123558187223553,0.000477619539977574,0.254366350512071,0.130539192408931,0.165983272524388,4.80691153863705,0.0127900408910393,1.86320096628611,1096.44289789592,0.000509279381358139,0.412387297977439,0.0106781826436847,0.00358184130171284,6.37858677842872e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
tilecorr.h
void tilecorr() { int c0,c1,c2,c3,c5,c6,c7,c9,c11,c10,c4,c12; for( c1 = 1; c1 < N + floord(N - 2, 128); c1 += 1) #pragma omp parallel for schedule(dynamic, 1) for( c3 = max(0, -N + c1 + 1); c3 <= (c1 - 1) / 129; c3 += 1) for( c4 = 0; c4 <= 1; c4 += 1) { if (c4 == 1) { for( c9 = N - c1 + 129 * c3; c9 <= min(N - 1, N - c1 + 129 * c3 + 127); c9 += 1) for( c10 = max(0, N - c1 + 129 * c3 - c9 + 1); c10 <= 1; c10 += 1) { if (c10 == 1) { S[(N-c1+c3-1)][c9] = MAX(S[(N-c1+c3-1)][c9], S[(N-c1+c3-1)+1][c9-1] + can_pair(RNA, (N-c1+c3-1), c9)); } else for( c11 = 128 * c3 + 1; c11 <= -N + c1 - c3 + c9; c11 += 1) S[(N-c1+c3-1)][c9] = MAX(S[(N-c1+c3-1)][c11+(N-c1+c3-1)] + S[c11+(N-c1+c3-1)+1][c9], S[(N-c1+c3-1)][c9]); } } else for( c5 = 0; c5 <= 8 * c3; c5 += 1) for( c9 = N - c1 + 129 * c3; c9 <= min(N - 1, N - c1 + 129 * c3 + 127); c9 += 1) for( c11 = 16 * c5; c11 <= min(128 * c3, 16 * c5 + 15); c11 += 1) S[(N-c1+c3-1)][c9] = MAX(S[(N-c1+c3-1)][c11+(N-c1+c3-1)] + S[c11+(N-c1+c3-1)+1][c9], S[(N-c1+c3-1)][c9]); } }
GB_subassign_05.c
//------------------------------------------------------------------------------ // GB_subassign_05: C(I,J)<M> = scalar ; no S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 05: C(I,J)<M> = scalar ; no S // M: present // Mask_comp: false // C_replace: false // accum: NULL // A: scalar // S: none #include "GB_subassign_methods.h" GrB_Info GB_subassign_05 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; int64_t zorig = C->nzombies ; const bool C_is_hyper = C->is_hyper ; const int64_t *GB_RESTRICT Ch = C->h ; const int64_t *GB_RESTRICT Cp = C->p ; const int64_t Cnvec = C->nvec ; const int64_t cvlen = C->vlen ; GB_GET_MASK ; GB_GET_SCALAR ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 05: C(I,J)<M> = scalar ; no S //-------------------------------------------------------------------------- // Time: Close to Optimal: the method must iterate over all entries in M, // so the time is Omega(nnz(M)). For each entry M(i,j)=1, the // corresponding entry in C must be found and updated (inserted or // modified). This method does this with a binary search of C(:,jC) or a // direct lookup if C(:,jC) is dense. The time is thus O(nnz(M)*log(n)) in // the worst case, usually less than that since C(:,jC) often has O(1) // entries. An additional time of O(|J|*log(Cnvec)) is added if C is // hypersparse. There is no equivalent method that computes // C(I,J)<M>=scalar using the matrix S. // Method 05 and Method 07 are very similar. Also compare with Method 06n. //-------------------------------------------------------------------------- // Parallel: slice M into coarse/fine tasks (Method 05, 06n, 07) //-------------------------------------------------------------------------- GB_SUBASSIGN_ONE_SLICE (M) ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = (Mh == NULL) ? k : Mh [k] ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; int64_t cjnz = pC_end - pC_start ; bool cjdense = (cjnz == cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (cjdense) { //-------------------------------------------------------------- // C(:,jC) is dense so the binary search of C is not needed //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = Mi [pM] ; GB_iC_DENSE_LOOKUP ; // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } } } else { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = Mi [pM] ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (cij_found) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } else { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; zorig = C->nzombies ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = (Mh == NULL) ? k : Mh [k] ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; bool cjdense = ((pC_end - pC_start) == cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (!cjdense) { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = Mi [pM] ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (!cij_found) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (1024*3) #define M (16*32) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) double A[M][N], B[M][N], C[N], D[N], E[N]; double S[M]; double p[2]; int main(void) { check_offloading(); INIT(); int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int tms = 16; int th = 32; int threads[1]; threads[0] = th-1; // // Test: proc_bind clause // #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES proc_bind(master) #include "defines.h" NESTED_PARALLEL_FOR( S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ }, for (int i = 0; i < N; i++) { \ A[idx][i] += C[i] + D[i]; \ B[idx][i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1)))) #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES proc_bind(close) #include "defines.h" NESTED_PARALLEL_FOR( S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ }, for (int i = 0; i < N; i++) { \ A[idx][i] += C[i] + D[i]; \ B[idx][i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1)))) #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES proc_bind(spread) #include "defines.h" NESTED_PARALLEL_FOR( S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ }, for (int i = 0; i < N; i++) { \ A[idx][i] += C[i] + D[i]; \ B[idx][i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1)))) // // Test: private, shared clauses on omp teams distribute parallel for with nested parallel. // #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E) #include "defines.h" NESTED_PARALLEL_FOR( double p = 2; \ double q = 4; \ S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ }, for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[idx][i] += p; \ B[idx][i] += q; \ } , { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1)))) // // Test: firstprivate clause on omp teams distribute parallel for with nested parallel. // #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p,q) #include "defines.h" NESTED_PARALLEL_FOR( double p = -4; \ double q = 4; \ S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ }, for (int i = 0; i < N; i++) { \ A[idx][i] += C[i] + D[i] + p; \ B[idx][i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ } , { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1)))) // // Test: lastprivate clause on omp teams distribute parallel for with nested parallel. // TESTD("omp target", { _Pragma("omp teams distribute parallel for num_teams(tms) num_threads(th)") for (int idx = 0; idx < tms*th; idx++) { double q0[1]; double q1[1]; double q2[1]; double q3[1]; S[idx] = 0; for (int i = 0; i < N; i++) { A[idx][i] = B[idx][i] = 0; } _Pragma("omp parallel for lastprivate(q0) if(threads[0] > 1) num_threads(threads[0])") for (int i = 0; i < N; i++) { q0[0] = C[i] + D[i]; A[idx][i] += q0[0]; } _Pragma("omp parallel for schedule(auto) lastprivate(q1) if(threads[0] > 1) num_threads(threads[0])") for (int i = 0; i < N; i++) { q1[0] = C[i] + D[i]; A[idx][i] += q1[0]; } _Pragma("omp parallel for schedule(static) lastprivate(q2) if(threads[0] > 1) num_threads(threads[0])") for (int i = 0; i < N; i++) { q2[0] = D[i] + E[i]; B[idx][i] += q2[0]; } _Pragma("omp parallel for schedule(static,9) lastprivate(q3) if(threads[0] > 1) num_threads(threads[0])") for (int i = 0; i < N; i++) { q3[0] = D[i] + E[i]; B[idx][i] += q3[0]; } double tmp = q0[0] + q1[0] + q2[0] + q3[0]; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; } }, VERIFY(0, tms*th, S[i], (double) 2 * (N + (N/2*(N+1))) )); // // Test: private clause on omp teams distribute parallel for with nested parallel. // #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES private(p) #include "defines.h" NESTED_PARALLEL_FOR( double p[2]; \ p[0] = 2; p[1] = 4; \ S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ p[1] = D[i] + E[i]; \ A[idx][i] += p[0]; \ B[idx][i] += p[1]; \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1)))) // // Test: firstprivate clause on omp teams distribute parallel for with nested parallel. // #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p) #include "defines.h" NESTED_PARALLEL_FOR( double p[2]; \ p[0] = -4; p[1] = 4; \ S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[idx][i] += C[i] + D[i] + p[0]; \ B[idx][i] += D[i] + E[i] + p[1]; \ if (i == N-1) { \ p[0] += 6; \ p[1] += 9; \ } \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1)))) // // Test: collapse clause on omp teams distribute parallel for with nested parallel. // #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES collapse(2) #include "defines.h" NESTED_PARALLEL_FOR( S[idx] = 0; \ for (int i = 0; i < N; i++) { \ A[idx][i] = B[idx][i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[idx][i*3+j] += C[i*3+j] + D[i*3+j]; \ B[idx][i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[idx][i] + B[idx][i]; } S[idx] += tmp; }, VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1)))) // // Test: ordered clause on omp teams distribute parallel for with nested parallel. // #undef NESTED_PARALLEL_FOR_CLAUSES #define NESTED_PARALLEL_FOR_CLAUSES ordered #include "defines.h" NESTED_PARALLEL_FOR( S[idx] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[idx] += C[i] + D[i]; \ } , { }, VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1)))) // // Test: Ensure coalesced scheduling on GPU. // if (!cpuExec) { TESTD("omp target", { _Pragma("omp teams distribute parallel for num_teams(tms) num_threads(th)") for (int idx = 0; idx < tms*th; idx++) { S[idx] = 0; for (int i = 0; i < 96; i++) { A[idx][i] = 0; } _Pragma("omp parallel for num_threads(32)") for (int i = 0; i < 96; i++) { A[idx][i] += i - omp_get_thread_num(); } _Pragma("omp parallel for schedule(auto) num_threads(32)") for (int i = 0; i < 96; i++) { A[idx][i] += i - omp_get_thread_num(); } _Pragma("omp parallel for schedule(static,1) num_threads(32)") for (int i = 0; i < 96; i++) { A[idx][i] += i - omp_get_thread_num(); } double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[idx][i]; } S[idx] = tmp; } }, VERIFY(0, tms*th, S[i], (double) 3 * (32*32 + 64*32) )); } else { DUMP_SUCCESS(1); } return 0; }
kernel_cpu.c
// #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #ifdef _OPENMP #include <omp.h> #endif // (in directory known to compiler) needed by openmp #include <stdio.h> // (in directory known to compiler) needed by printf, stderr #include <stdlib.h> // (in directory known to compiler) needed by malloc //======================================================================================================================================================150 // COMMON //======================================================================================================================================================150 #include "../common.h" // (in directory provided here) //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "../util/timer/timer.h" // (in directory provided here) //========================================================================================================================================================================================================200 // KERNEL_CPU FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu(int cores_arg, record *records, knode *knodes, long knodes_elem, long records_elem, int order, long maxheight, int count, long *currKnode, long *offset, int *keys, record *ans) { //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int max_nthreads; #ifdef _OPENMP max_nthreads = omp_get_max_threads(); // printf("max # of threads = %d\n", max_nthreads); omp_set_num_threads(cores_arg); // printf("set # of threads = %d\n", cores_arg); #endif int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; int i; int x = 100; int *A; A = (int *)malloc(sizeof(int) * x); // process number of querries #pragma omp target map( \ to : keys[ : count], \ knodes[ : knodes_elem], records[ : records_elem]) \ map(tofrom : offset[ : count], \ ans[ : count], currKnode[ : count]) { #pragma omp teams distribute parallel for private(i, thid) for (bid = 0; bid < count; bid++) { // process levels of the tree for (i = 0; i < maxheight; i++) { // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { // if value is between the two keys if ((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid + 1] > keys[bid])) { // this conditional statement is inserted to avoid crush due to but // in original code // "offset[bid]" calculated below that addresses knodes[] in the // next iteration goes outside of its bounds cause segmentation // fault // more specifically, values saved into knodes->indices in the main // function are out of bounds of knodes that they address if (knodes[offset[bid]].indices[thid] < knodes_elem) { offset[bid] = knodes[offset[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; } // At this point, we have a candidate leaf node which may contain // the target record. Check each key to hopefully find the record // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { if (knodes[currKnode[bid]].keys[thid] == keys[bid]) { ans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value; } } } } } void kernel_cpu(int cores_arg, record *records, knode *knodes, long knodes_elem, long records_elem, int order, long maxheight, int count, long *currKnode, long *offset, int *keys, record *ans) { //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int max_nthreads; #ifdef _OPENMP max_nthreads = omp_get_max_threads(); // printf("max # of threads = %d\n", max_nthreads); omp_set_num_threads(cores_arg); // printf("set # of threads = %d\n", cores_arg); #endif int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; int i; int x = 100; int *A; A = (int *)malloc(sizeof(int) * x); // process number of querries for (bid = 0; bid < count; bid++) { // process levels of the tree for (i = 0; i < maxheight; i++) { // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { // if value is between the two keys if ((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid + 1] > keys[bid])) { // this conditional statement is inserted to avoid crush due to but in // original code // "offset[bid]" calculated below that addresses knodes[] in the next // iteration goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main // function are out of bounds of knodes that they address if (knodes[offset[bid]].indices[thid] < knodes_elem) { offset[bid] = knodes[offset[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; } // At this point, we have a candidate leaf node which may contain // the target record. Check each key to hopefully find the record // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { if (knodes[currKnode[bid]].keys[thid] == keys[bid]) { ans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value; } } } }
2.norace6.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 200 int main() { int A[N], x = 0; #pragma omp simd linear(x : 2) for (int i = 0; i < N; i++) A[i] = x; } // CHECK: Region is Data Race Free. // END
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor,ResInnerStride> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper; LhsMapper lhs(_lhs, lhsStride); RhsMapper rhs(_rhs, rhsStride); ResMapper res(_res, resStride, resIncr); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>()); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
ab-totient-omp-10.c
// Distributed and parallel technologies, Andrew Beveridge, 03/03/2014 // To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c // To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end #include <stdio.h> #include <omp.h> /* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1). If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */ long getTotient (long number) { long result = number; // Check every prime number below the square root for divisibility if(number % 2 == 0){ result -= result / 2; do number /= 2; while(number %2 == 0); } // Primitive replacement for a list of primes, looping through every odd number long prime; for(prime = 3; prime * prime <= number; prime += 2){ if(number %prime == 0){ result -= result / prime; do number /= prime; while(number % prime == 0); } } // Last common factor if(number > 1) result -= result / number; // Return the result. return result; } // Main method. int main(int argc, char ** argv) { // Load inputs long lower, upper; sscanf(argv[1], "%ld", &lower); sscanf(argv[2], "%ld", &upper); int i; long result = 0.0; // We know the answer if it's 1; no need to execute the function if(lower == 1) { result = 1.0; lower = 2; } #pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(10) // Sum all totients in the specified range for (i = lower; i <= upper; i++) { result = result + getTotient(i); } // Print the result printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result); // A-OK! return 0; }
GB_unop__acosh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__acosh_fc32_fc32) // op(A') function: GB (_unop_tran__acosh_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = cacoshf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cacoshf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = cacoshf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOSH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__acosh_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cacoshf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cacoshf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__acosh_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_uint8 // op(A') function: GB_tran__identity_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_uint8 ( uint8_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
_phonopy.c
/* Copyright (C) 2011 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <float.h> #include <math.h> #include <numpy/arrayobject.h> #include <stddef.h> #include <stdio.h> #include "phonopy.h" /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject* py_transform_dynmat_to_fc(PyObject* self, PyObject* args); static PyObject* py_perm_trans_symmetrize_fc(PyObject* self, PyObject* args); static PyObject* py_perm_trans_symmetrize_compact_fc(PyObject* self, PyObject* args); static PyObject* py_transpose_compact_fc(PyObject* self, PyObject* args); static PyObject* py_get_dynamical_matrix(PyObject* self, PyObject* args); static PyObject* py_get_nac_dynamical_matrix(PyObject* self, PyObject* args); static PyObject* py_get_recip_dipole_dipole(PyObject* self, PyObject* args); static PyObject* py_get_recip_dipole_dipole_q0(PyObject* self, PyObject* args); static PyObject* py_get_derivative_dynmat(PyObject* self, PyObject* args); static PyObject* py_get_thermal_properties(PyObject* self, PyObject* args); static PyObject* py_distribute_fc2(PyObject* self, PyObject* args); static PyObject* py_compute_permutation(PyObject* self, PyObject* args); static PyObject* py_gsv_set_smallest_vectors_sparse(PyObject* self, PyObject* args); static PyObject* py_gsv_set_smallest_vectors_dense(PyObject* self, PyObject* args); static PyObject* py_thm_relative_grid_address(PyObject* self, PyObject* args); static PyObject* py_thm_all_relative_grid_address(PyObject* self, PyObject* args); static PyObject* py_thm_integration_weight(PyObject* self, PyObject* args); static PyObject* py_thm_integration_weight_at_omegas(PyObject* self, PyObject* args); static PyObject* py_get_tetrahedra_frequenies(PyObject* self, PyObject* args); static PyObject* py_tetrahedron_method_dos(PyObject* self, PyObject* args); struct module_state { PyObject* error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject* error_out(PyObject* m) { struct module_state* st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force " "constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"recip_dipole_dipole", py_get_recip_dipole_dipole, METH_VARARGS, "Reciprocal part of dipole-dipole interaction"}, {"recip_dipole_dipole_q0", py_get_recip_dipole_dipole_q0, METH_VARARGS, "q=0 terms of reciprocal part of dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed " "symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_set_smallest_vectors_sparse", py_gsv_set_smallest_vectors_sparse, METH_VARARGS, "Set shortest vectors in sparse array."}, {"gsv_set_smallest_vectors_dense", py_gsv_set_smallest_vectors_dense, METH_VARARGS, "Set shortest vectors in dense array."}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL}}; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject* m, visitproc visit, void* arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject* m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL}; #define INITERROR return NULL PyObject* PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject* module = PyModule_Create(&moduledef); #else PyObject* module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state* st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject* py_transform_dynmat_to_fc(PyObject* self, PyObject* args) { PyArrayObject* py_force_constants; PyArrayObject* py_dynamical_matrices; PyArrayObject* py_commensurate_points; PyArrayObject* py_svecs; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2pp_map; PyArrayObject* py_fc_index_map; double* fc; double* dm; double(*comm_points)[3]; double(*svecs)[3]; double* masses; long(*multi)[2]; long* s2pp_map; long* fc_index_map; long num_patom; long num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_svecs, &py_multi, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double*)PyArray_DATA(py_force_constants); dm = (double*)PyArray_DATA(py_dynamical_matrices); comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points); svecs = (double(*)[3])PyArray_DATA(py_svecs); masses = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2pp_map = (long*)PyArray_DATA(py_s2pp_map); fc_index_map = (long*)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multi)[1]; num_satom = PyArray_DIMS(py_multi)[0]; phpy_transform_dynmat_to_fc(fc, dm, comm_points, svecs, multi, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject* py_compute_permutation(PyObject* self, PyObject* args) { PyArrayObject* permutation; PyArrayObject* lattice; PyArrayObject* positions; PyArrayObject* permuted_positions; double symprec; int* rot_atoms; double(*lat)[3]; double(*pos)[3]; double(*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int*)PyArray_DATA(permutation); lat = (double(*)[3])PyArray_DATA(lattice); pos = (double(*)[3])PyArray_DATA(positions); rot_pos = (double(*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = phpy_compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); if (is_found) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject* py_gsv_set_smallest_vectors_sparse(PyObject* self, PyObject* args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; double symprec; double(*smallest_vectors)[27][3]; int* multiplicity; double(*pos_to)[3]; double(*pos_from)[3]; int(*lattice_points)[3]; double(*reduced_basis)[3]; int(*trans_mat)[3]; int num_pos_to, num_pos_from, num_lattice_points; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points); num_lattice_points = PyArray_DIMS(py_lattice_points)[0]; reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat); phpy_set_smallest_vectors_sparse(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, num_lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject* py_gsv_set_smallest_vectors_dense(PyObject* self, PyObject* args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; long initialize; double symprec; double(*smallest_vectors)[3]; long(*multiplicity)[2]; double(*pos_to)[3]; double(*pos_from)[3]; long(*lattice_points)[3]; double(*reduced_basis)[3]; long(*trans_mat)[3]; long num_pos_to, num_pos_from, num_lattice_points; if (!PyArg_ParseTuple(args, "OOOOOOOld", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &initialize, &symprec)) { return NULL; } smallest_vectors = (double(*)[3])PyArray_DATA(py_smallest_vectors); multiplicity = (long(*)[2])PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (long(*)[3])PyArray_DATA(py_lattice_points); num_lattice_points = PyArray_DIMS(py_lattice_points)[0]; reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (long(*)[3])PyArray_DATA(py_trans_mat); phpy_set_smallest_vectors_dense( smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, num_lattice_points, reduced_basis, trans_mat, initialize, symprec); Py_RETURN_NONE; } static PyObject* py_perm_trans_symmetrize_fc(PyObject* self, PyObject* args) { PyArrayObject* py_fc; double* fc; int level; int n_satom; if (!PyArg_ParseTuple(args, "Oi", &py_fc, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); n_satom = PyArray_DIMS(py_fc)[0]; phpy_perm_trans_symmetrize_fc(fc, n_satom, level); Py_RETURN_NONE; } static PyObject* py_perm_trans_symmetrize_compact_fc(PyObject* self, PyObject* args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; int level; double* fc; int* perms; int* s2pp; int* p2s; int* nsym_list; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; phpy_perm_trans_symmetrize_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, level); Py_RETURN_NONE; } static PyObject* py_transpose_compact_fc(PyObject* self, PyObject* args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; double* fc; int* s2pp; int* p2s; int* nsym_list; int* perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; phpy_set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject* py_get_dynamical_matrix(PyObject* self, PyObject* args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_q; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; double* dm; double* fc; double* q; double(*svecs)[3]; double* m; long(*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[3])PyArray_DATA(py_svecs); m = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; phpy_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject* py_get_nac_dynamical_matrix(PyObject* self, PyObject* args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_q_cart; PyArrayObject* py_q; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; double factor; double* dm; double* fc; double* q_cart; double* q; double(*svecs)[3]; double* m; double(*born)[3][3]; long(*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; long n; double(*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q_cart = (double*)PyArray_DATA(py_q_cart); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[3])PyArray_DATA(py_svecs); m = (double*)PyArray_DATA(py_masses); born = (double(*)[3][3])PyArray_DATA(py_born); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double(*)[3][3])malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; phpy_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); phpy_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject* py_get_recip_dipole_dipole(PyObject* self, PyObject* args) { PyArrayObject* py_dd; PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_q_cart; PyArrayObject* py_q_direction; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double factor; double lambda; double tolerance; double* dd; double* dd_q0; double(*G_list)[3]; double* q_vector; double* q_direction; double(*born)[3][3]; double(*dielectric)[3]; double(*pos)[3]; long num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double*)PyArray_DATA(py_dd); dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); if ((PyObject*)py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double*)PyArray_DATA(py_q_direction); } q_vector = (double*)PyArray_DATA(py_q_cart); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; phpy_get_recip_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject* py_get_recip_dipole_dipole_q0(PyObject* self, PyObject* args) { PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double lambda; double tolerance; double* dd_q0; double(*G_list)[3]; double(*born)[3][3]; double(*dielectric)[3]; double(*pos)[3]; long num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; phpy_get_recip_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject* py_get_derivative_dynmat(PyObject* self, PyObject* args) { PyArrayObject* py_derivative_dynmat; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_lattice; PyArrayObject* py_q_vector; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_q_direction; double nac_factor; double* ddm; double* fc; double* q_vector; double* lat; double(*svecs)[3]; double* masses; long(*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; double* born; double* epsilon; double* q_dir; if (!PyArg_ParseTuple( args, "OOOOOOOOOdOOO", &py_derivative_dynmat, &py_force_constants, &py_q_vector, &py_lattice, /* column vectors */ &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &py_dielectric, &py_q_direction)) { return NULL; } ddm = (double*)PyArray_DATA(py_derivative_dynmat); fc = (double*)PyArray_DATA(py_force_constants); q_vector = (double*)PyArray_DATA(py_q_vector); lat = (double*)PyArray_DATA(py_lattice); svecs = (double(*)[3])PyArray_DATA(py_svecs); masses = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject*)py_born == Py_None) { born = NULL; } else { born = (double*)PyArray_DATA(py_born); } if ((PyObject*)py_dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double*)PyArray_DATA(py_dielectric); } if ((PyObject*)py_q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)PyArray_DATA(py_q_direction); } phpy_get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q_vector, lat, svecs, multi, masses, s2p_map, p2s_map, nac_factor, born, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject* py_get_thermal_properties(PyObject* self, PyObject* args) { PyArrayObject* py_thermal_props; PyArrayObject* py_temperatures; PyArrayObject* py_frequencies; PyArrayObject* py_weights; double cutoff_frequency; double* temperatures; double* freqs; double* thermal_props; long* weights; long num_qpoints; long num_bands; long num_temp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double*)PyArray_DATA(py_thermal_props); temperatures = (double*)PyArray_DATA(py_temperatures); num_temp = (long)PyArray_DIMS(py_temperatures)[0]; freqs = (double*)PyArray_DATA(py_frequencies); num_qpoints = (long)PyArray_DIMS(py_frequencies)[0]; weights = (long*)PyArray_DATA(py_weights); num_bands = (long)PyArray_DIMS(py_frequencies)[1]; phpy_get_thermal_properties(thermal_props, temperatures, freqs, weights, num_temp, num_qpoints, num_bands, cutoff_frequency); Py_RETURN_NONE; } static PyObject* py_distribute_fc2(PyObject* self, PyObject* args) { PyArrayObject* py_force_constants; PyArrayObject* py_permutations; PyArrayObject* py_map_atoms; PyArrayObject* py_map_syms; PyArrayObject* py_atom_list; PyArrayObject* py_rotations_cart; double(*r_carts)[3][3]; double(*fc2)[3][3]; int* permutations; int* map_atoms; int* map_syms; int* atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int*)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int*)PyArray_DATA(py_permutations); map_atoms = (int*)PyArray_DATA(py_map_atoms); map_syms = (int*)PyArray_DATA(py_map_syms); r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } phpy_distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject* py_thm_relative_grid_address(PyObject* self, PyObject* args) { PyArrayObject* py_relative_grid_address; PyArrayObject* py_reciprocal_lattice_py; long(*relative_grid_address)[4][3]; double(*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py); phpy_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject* py_thm_all_relative_grid_address(PyObject* self, PyObject* args) { PyArrayObject* py_relative_grid_address; long(*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (long(*)[24][4][3])PyArray_DATA(py_relative_grid_address); phpy_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject* py_thm_integration_weight(PyObject* self, PyObject* args) { double omega; PyArrayObject* py_tetrahedra_omegas; char* function; double(*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = phpy_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject* py_thm_integration_weight_at_omegas(PyObject* self, PyObject* args) { PyArrayObject* py_integration_weights; PyArrayObject* py_omegas; PyArrayObject* py_tetrahedra_omegas; char* function; double* omegas; double* iw; long num_omegas; double(*tetrahedra_omegas)[4]; long i; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double*)PyArray_DATA(py_omegas); iw = (double*)PyArray_DATA(py_integration_weights); num_omegas = (long)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); #pragma omp parallel for for (i = 0; i < num_omegas; i++) { iw[i] = phpy_get_integration_weight(omegas[i], tetrahedra_omegas, function[0]); } Py_RETURN_NONE; } static PyObject* py_get_tetrahedra_frequenies(PyObject* self, PyObject* args) { PyArrayObject* py_freq_tetras; PyArrayObject* py_grid_points; PyArrayObject* py_mesh; PyArrayObject* py_grid_address; PyArrayObject* py_gp_ir_index; PyArrayObject* py_relative_grid_address; PyArrayObject* py_frequencies; double* freq_tetras; long* grid_points; long* mesh; long(*grid_address)[3]; long* gp_ir_index; long(*relative_grid_address)[3]; double* frequencies; long num_gp_in, num_band; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double*)PyArray_DATA(py_freq_tetras); grid_points = (long*)PyArray_DATA(py_grid_points); num_gp_in = PyArray_DIMS(py_grid_points)[0]; mesh = (long*)PyArray_DATA(py_mesh); grid_address = (long(*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (long*)PyArray_DATA(py_gp_ir_index); relative_grid_address = (long(*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double*)PyArray_DATA(py_frequencies); num_band = PyArray_DIMS(py_frequencies)[1]; phpy_get_tetrahedra_frequenies(freq_tetras, mesh, grid_points, grid_address, relative_grid_address, gp_ir_index, frequencies, num_band, num_gp_in); Py_RETURN_NONE; } static PyObject* py_tetrahedron_method_dos(PyObject* self, PyObject* args) { PyArrayObject* py_dos; PyArrayObject* py_mesh; PyArrayObject* py_freq_points; PyArrayObject* py_frequencies; PyArrayObject* py_coef; PyArrayObject* py_grid_address; PyArrayObject* py_grid_mapping_table; PyArrayObject* py_relative_grid_address; double* dos; long* mesh; double* freq_points; double* frequencies; double* coef; long(*grid_address)[3]; long num_gp, num_ir_gp, num_band, num_freq_points, num_coef; long* grid_mapping_table; long(*relative_grid_address)[4][3]; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double*)PyArray_DATA(py_dos); mesh = (long*)PyArray_DATA(py_mesh); freq_points = (double*)PyArray_DATA(py_freq_points); num_freq_points = (long)PyArray_DIMS(py_freq_points)[0]; frequencies = (double*)PyArray_DATA(py_frequencies); num_ir_gp = (long)PyArray_DIMS(py_frequencies)[0]; num_band = (long)PyArray_DIMS(py_frequencies)[1]; coef = (double*)PyArray_DATA(py_coef); num_coef = (long)PyArray_DIMS(py_coef)[1]; grid_address = (long(*)[3])PyArray_DATA(py_grid_address); num_gp = (long)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (long*)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); phpy_tetrahedron_method_dos(dos, mesh, grid_address, relative_grid_address, grid_mapping_table, freq_points, frequencies, coef, num_freq_points, num_ir_gp, num_band, num_coef, num_gp); Py_RETURN_NONE; }
GB_unaryop__lnot_fp32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_int16 // op(A') function: GB_tran__lnot_fp32_int16 // C type: float // A type: int16_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_int16 ( float *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_bool_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_int8) // op(A') function: GB (_unop_tran__identity_bool_int8) // C type: bool // A type: int8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_int8) ( bool *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__max_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_int64) // A.*B function (eWiseMult): GB (_AemultB_08__max_int64) // A.*B function (eWiseMult): GB (_AemultB_02__max_int64) // A.*B function (eWiseMult): GB (_AemultB_04__max_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int64) // A*D function (colscale): GB (_AxD__max_int64) // D*A function (rowscale): GB (_DxB__max_int64) // C+=B function (dense accum): GB (_Cdense_accumB__max_int64) // C+=b function (dense accum): GB (_Cdense_accumb__max_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int64) // C=scalar+B GB (_bind1st__max_int64) // C=scalar+B' GB (_bind1st_tran__max_int64) // C=A+scalar GB (_bind2nd__max_int64) // C=A'+scalar GB (_bind2nd_tran__max_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT64 || GxB_NO_MAX_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GrB_Vector_wait.c
//------------------------------------------------------------------------------ // GrB_Vector_wait: wait for a vector to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Finishes all work on a vector, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GrB_Vector_wait // finish all work on a vector ( GrB_Vector *v ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE ((*v), "GrB_Vector_wait (&v)") ; GB_RETURN_IF_NULL (v) ; GB_RETURN_IF_NULL_OR_FAULTY (*v) ; //-------------------------------------------------------------------------- // finish all pending work on the vector //-------------------------------------------------------------------------- if (GB_ANY_PENDING_WORK (*v)) { GrB_Info info ; GB_BURBLE_START ("GrB_Vector_wait") ; GB_OK (GB_wait ((GrB_Matrix) (*v), "vector", Context)) ; GB_BURBLE_END ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
elu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "elu_kernel_arm.h" #include "neon_mathfun.h" #include <math.h> #include <arm_neon.h> static void elu_kernel(int i, int id, void* data, const float* input, float* output, float alpha) { int elem_num = ((int*)data)[0]; float32x4_t _one = vdupq_n_f32(1.f); float32x4_t _zero = vdupq_n_f32(0.f); float32x4_t _alpha = vdupq_n_f32(alpha); const float* cur_input = input + id * elem_num; float* cur_output = output + id * elem_num; for (int i = 0; i < (elem_num & -4); i += 4) { float32x4_t _p = vld1q_f32(cur_input); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _nps = exp_ps(_p); _nps = vsubq_f32(_nps, _one); _nps = vmulq_f32(_nps, _alpha); _p = vbslq_f32(_lemask, _nps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = elem_num & ~3; i < elem_num; i++) { if (*cur_input < 0.f) *cur_output = (exp(*cur_input) - 1.f) * alpha; else *cur_output = *cur_input; cur_input++; cur_output++; } } int elu_run(struct tensor* output_tensor, struct tensor* input_tensor, struct elu_param* elu_param, int num_thread) { float* data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; float alpha = elu_param->alpha; int chan_num = (input_tensor->dims[0]) * (input_tensor->dims[1]); int chan_size = (input_tensor->dims[2]) * (input_tensor->dims[3]); #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; elu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha); } return 0; }
Helm3d_27pt.h
#include "math.h" #include <complex.h> #ifdef SINGLE_PRECISION #define numeric float; #else #define numeric double; #endif typedef double _Complex cmplx_type; #ifdef WNCMPLX typedef cmplx_type wn_type; #else typedef double wn_type; #endif #define PI M_PI #define CMPLX(x, y) ((double complex)((double)(x) + _Complex_I * (double)(y))) #define IDX1D(a,b,m,n) ((a) + (b)*(m)) #define IDX1D3(a,b,c,m,n,p) ((a) + (b)*(m) + (c)*(m)*(n)) #define IDX1D4(a,b,c,d,m,n,p) ((a) + (b)*(m) + (c)*(m)*(n) + (d)*(m)*(n)*(p)) /* void print_c(const double complex c) { mexPrintf("%3.15e + i%3.15e ",creal(c),cimag(c)); } void print_nbrhdc(const double complex * c) { for(int k=0; k<3; k++) { for(int j=0; j<3; j++) { for(int i=0; i<3; i++) { print_c(c[IDX1D3(i,j,k,3,3,3)]); mexPrintf("\n"); } mexPrintf("\n"); } mexPrintf("\n\n"); } } void print_nbrhd(const double * c) { for(int k=0; k<3; k++) { for(int j=0; j<3; j++) { for(int i=0; i<3; i++) { mexPrintf("%3.15e ",c[IDX1D3(i,j,k,3,3,3)]); mexPrintf("\n"); } mexPrintf("\n"); } mexPrintf("\n\n"); } } */ #define xyzloop_updown(...) for(k=0; k<nz; k++) \ { \ pmlzfunc(&p,&padj,k,nz,npmlz_lo,npmlz_hi,pmlz_alloc); \ pmlz_alloc = 1; \ p.z_hasL = k>0; p.z_hasR = k < nz-1; \ for(j=0; j<ny; j++) \ { \ p.y_hasL = j > 0; p.y_hasR = j < ny-1; \ pmlyfunc(&p,&padj,j,ny,npmly_lo,npmly_hi,pmly_alloc); \ pmly_alloc = 1; \ for(i=0; i<nx; i++) \ { \ p.x_hasL = i > 0; p.x_hasR = i < nx-1; \ pmlxfunc(&p,&padj,i,nx,npmlx_lo,npmlx_hi,pmlx_alloc,true); \ pmlx_alloc = 1; \ __VA_ARGS__ \ } \ pmlx_alloc = 0; \ } \ pmly_alloc = 0; \ } \ pmlz_alloc = 0; \ for(k=nz-1; k>=0; k--) \ { \ pmlzfunc(&p,&padj,k,nz,npmlz_lo,npmlz_hi,pmlz_alloc); \ pmlz_alloc = 1; \ p.z_hasL = k>0; p.z_hasR = k < nz-1; \ for(j=ny-1; j>=0; j--) \ { \ p.y_hasL = j > 0; p.y_hasR = j < ny-1; \ pmlyfunc(&p,&padj,j,ny,npmly_lo,npmly_hi,pmly_alloc); \ pmly_alloc = 1; \ for(i=nx-1; i>=0; i--) \ { \ p.x_hasL = i > 0; p.x_hasR = i < nx-1; \ pmlxfunc(&p,&padj,i,nx,npmlx_lo,npmlx_hi,pmlx_alloc,false); \ pmlx_alloc = 1; \ __VA_ARGS__ \ } \ pmlx_alloc = 0; \ } \ pmly_alloc = 0; \ } \ pmlz_alloc = 0; #define xyzloop_up(...) for(k=0; k<nz; k++) \ { \ pmlzfunc(&p,&padj,k,nz,npmlz_lo,npmlz_hi,pmlz_alloc); \ pmlz_alloc = 1; \ p.z_hasL = k > 0; p.z_hasR = k < nz-1; \ for(j=0; j<ny; j++) \ { \ p.y_hasL = j > 0; p.y_hasR = j < ny-1; \ pmlyfunc(&p,&padj,j,ny,npmly_lo,npmly_hi,pmly_alloc); \ pmly_alloc = 1; \ for(i=0; i<nx; i++) \ { \ p.x_hasL = i > 0; p.x_hasR = i < nx-1; \ pmlxfunc(&p,&padj,i,nx,npmlx_lo,npmlx_hi,pmlx_alloc,1); \ pmlx_alloc = 1; \ __VA_ARGS__ \ } \ pmlx_alloc = 0; \ } \ pmly_alloc = 0; \ } #define xyzloop_down(...) for(k=nz-1; k>=0; k--) \ { \ pmlzfunc(&p,&padj,k,nz,npmlz_lo,npmlz_hi,pmlz_alloc); \ pmlz_alloc = 1; \ p.z_hasL = k > 0; p.z_hasR = k < nz-1; \ for(j=ny-1; j>=0; j--) \ { \ pmlyfunc(&p,&padj,j,ny,npmly_lo,npmly_hi,pmly_alloc); \ p.y_hasL = j > 0; p.y_hasR = j < ny-1; \ pmly_alloc = 1; \ for(i=nx-1; i>=0; i--) \ { \ p.x_hasL = i > 0; p.x_hasR = i < nx-1; \ pmlxfunc(&p,&padj,i,nx,npmlx_lo,npmlx_hi,pmlx_alloc,0); \ pmlx_alloc = 1; \ __VA_ARGS__ \ } \ pmlx_alloc = 0; \ } \ pmly_alloc = 0; \ } /* The current coordinate in each direction is denoted as N +1/-1 in the corresponding coordinate is denoted as P/M, respectively So, e.g., NNN - current point (x,y,z) MNN - (x-1,y,z) PNN - (x+1,y,z) MNP - (x-1,y,z+1) PMP - (x+1,y-1,z+1) etc. */ int const MMM = 0; int const NMM = 1; int const PMM = 2; int const MNM = 3; int const NNM = 4; int const PNM = 5; int const MPM = 6; int const NPM = 7; int const PPM = 8; int const MMN = 9; int const NMN = 10; int const PMN = 11; int const MNN = 12; int const NNN = 13; int const PNN = 14; int const MPN = 15; int const NPN = 16; int const PPN = 17; int const MMP = 18; int const NMP = 19; int const PMP = 20; int const MNP = 21; int const NNP = 22; int const PNP = 23; int const MPP = 24; int const NPP = 25; int const PPP = 26; /* Absolute constants related to the stencil */ double const W1 = (1.8395262e-5); double const W2 = (0.296692333333333); double const W3 = (0.027476150000000); double const WM1 = (0.49649658); double const WM2 = (0.075168750000000); double const WM3 = (0.004373916666667); double const WM4 = (5.690375e-07); #ifdef DERIV double const non_deriv_mode = 0; #else double const non_deriv_mode = 1; #endif /* Run time constants related to the stencil */ struct _coef_consts { const double wn_coef; const double wn_xcoef; const double wn_ycoef; const double wn_zcoef; const double pmlx_coef; const double pmly_coef; const double pmlz_coef; const double xz_coef; const double xy_coef; const double yz_coef; const double W3A_2; }; typedef struct _coef_consts coef_consts; /* Structs for holding information of pml function values in each direction */ struct _pml_info { double complex pmlx_lo; double complex pmlx_hi; double complex pmlx; double complex pmly_lo; double complex pmly_hi; double complex pmly; double complex pmlz_lo; double complex pmlz_hi; double complex pmlz; int x_hasL; int x_hasR; int y_hasL; int y_hasR; int z_hasL; int z_hasR; }; typedef struct _pml_info pml_info; struct _pml_adj_info { double complex pmlx_lo_window[3]; double complex pmlx_hi_window[3]; double complex pmly_lo_window[3]; double complex pmly_hi_window[3]; double complex pmlz_lo_window[3]; double complex pmlz_hi_window[3]; }; typedef struct _pml_adj_info pml_adj_info; #define MMM_BDRY(...) __builtin_expect(p.x_hasL && p.y_hasL && p.z_hasL,1) ? ( __VA_ARGS__ ) : 0 #define NMM_BDRY(...) __builtin_expect(p.y_hasL && p.z_hasL,1) ? ( __VA_ARGS__ ) : 0 #define PMM_BDRY(...) __builtin_expect(p.x_hasR && p.y_hasL && p.z_hasL,1) ? ( __VA_ARGS__ ) : 0 #define MNM_BDRY(...) __builtin_expect(p.x_hasL && p.z_hasL,1) ? ( __VA_ARGS__ ) : 0 #define NNM_BDRY(...) __builtin_expect(p.z_hasL,1) ? ( __VA_ARGS__) : 0 #define PNM_BDRY(...) __builtin_expect(p.x_hasR && p.z_hasL,1) ? ( __VA_ARGS__) : 0 #define MPM_BDRY(...) __builtin_expect(p.x_hasL && p.y_hasR && p.z_hasL,1) ? ( __VA_ARGS__) : 0 #define NPM_BDRY(...) __builtin_expect(p.y_hasR && p.z_hasL,1) ? ( __VA_ARGS__) : 0 #define PPM_BDRY(...) __builtin_expect(p.x_hasR && p.y_hasR && p.z_hasL,1) ? ( __VA_ARGS__) : 0 #define MMN_BDRY(...) __builtin_expect(p.x_hasL && p.y_hasL,1) ? ( __VA_ARGS__) : 0 #define NMN_BDRY(...) __builtin_expect(p.y_hasL,1) ? ( __VA_ARGS__) : 0 #define PMN_BDRY(...) __builtin_expect(p.x_hasR && p.y_hasL,1) ? ( __VA_ARGS__) : 0 #define MNN_BDRY(...) __builtin_expect(p.x_hasL,1) ? ( __VA_ARGS__) : 0 #define PNN_BDRY(...) __builtin_expect(p.x_hasR,1) ? ( __VA_ARGS__) : 0 #define MPN_BDRY(...) __builtin_expect(p.x_hasL && p.y_hasR,1) ? ( __VA_ARGS__) : 0 #define NPN_BDRY(...) __builtin_expect(p.y_hasR,1) ? ( __VA_ARGS__) : 0 #define PPN_BDRY(...) __builtin_expect(p.x_hasR && p.y_hasR,1) ? ( __VA_ARGS__) : 0 #define MMP_BDRY(...) __builtin_expect(p.x_hasL && p.y_hasL && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define NMP_BDRY(...) __builtin_expect(p.y_hasL && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define PMP_BDRY(...) __builtin_expect(p.x_hasR && p.y_hasL && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define MNP_BDRY(...) __builtin_expect(p.x_hasL && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define NNP_BDRY(...) __builtin_expect(p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define PNP_BDRY(...) __builtin_expect(p.x_hasR && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define MPP_BDRY(...) __builtin_expect(p.x_hasL && p.y_hasR && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define NPP_BDRY(...) __builtin_expect(p.y_hasR && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define PPP_BDRY(...) __builtin_expect(p.x_hasR && p.y_hasR && p.z_hasR,1) ? ( __VA_ARGS__) : 0 #define MMM_IF(...) if(__builtin_expect(p.x_hasL && p.y_hasL && p.z_hasL,1)) { __VA_ARGS__ } #define NMM_IF(...) if(__builtin_expect(p.y_hasL && p.z_hasL,1)){ __VA_ARGS__ } #define PMM_IF(...) if(__builtin_expect(p.x_hasR && p.y_hasL && p.z_hasL,1)){ __VA_ARGS__ } #define MNM_IF(...) if(__builtin_expect(p.x_hasL && p.z_hasL,1)){ __VA_ARGS__ } #define NNM_IF(...) if(__builtin_expect(p.z_hasL,1)) { __VA_ARGS__ } #define PNM_IF(...) if(__builtin_expect(p.x_hasR && p.z_hasL,1)){ __VA_ARGS__ } #define MPM_IF(...) if(__builtin_expect(p.x_hasL && p.y_hasR && p.z_hasL,1)){ __VA_ARGS__ } #define NPM_IF(...) if(__builtin_expect(p.y_hasR && p.z_hasL,1)) { __VA_ARGS__ } #define PPM_IF(...) if(__builtin_expect(p.x_hasR && p.y_hasR && p.z_hasL,1)) { __VA_ARGS__ } #define MMN_IF(...) if(__builtin_expect(p.x_hasL && p.y_hasL,1)){ __VA_ARGS__ } #define NMN_IF(...) if(__builtin_expect(p.y_hasL,1)){ __VA_ARGS__ } #define PMN_IF(...) if(__builtin_expect(p.x_hasR && p.y_hasL,1)){ __VA_ARGS__ } #define MNN_IF(...) if(__builtin_expect(p.x_hasL,1)){ __VA_ARGS__ } #define PNN_IF(...) if(__builtin_expect(p.x_hasR,1)){ __VA_ARGS__ } #define MPN_IF(...) if(__builtin_expect(p.x_hasL && p.y_hasR,1)){ __VA_ARGS__ } #define NPN_IF(...) if(__builtin_expect(p.y_hasR,1)){ __VA_ARGS__ } #define PPN_IF(...) if(__builtin_expect(p.x_hasR && p.y_hasR,1)){ __VA_ARGS__ } #define MMP_IF(...) if(__builtin_expect(p.x_hasL && p.y_hasL && p.z_hasR,1)){ __VA_ARGS__ } #define NMP_IF(...) if(__builtin_expect(p.y_hasL && p.z_hasR,1)){ __VA_ARGS__ } #define PMP_IF(...) if(__builtin_expect(p.x_hasR && p.y_hasL && p.z_hasR,1)){ __VA_ARGS__ } #define MNP_IF(...) if(__builtin_expect(p.x_hasL && p.z_hasR,1)){ __VA_ARGS__ } #define NNP_IF(...) if(__builtin_expect(p.z_hasR,1)){ __VA_ARGS__ } #define PNP_IF(...) if(__builtin_expect(p.x_hasR && p.z_hasR,1)){ __VA_ARGS__ } #define MPP_IF(...) if(__builtin_expect(p.x_hasL && p.y_hasR && p.z_hasR,1)){ __VA_ARGS__ } #define NPP_IF(...) if(__builtin_expect(p.y_hasR && p.z_hasR,1)){ __VA_ARGS__ } #define PPP_IF(...) if(__builtin_expect(p.x_hasR && p.y_hasR && p.z_hasR,1)){ __VA_ARGS__ } /******************************** PML related functions ********************************/ inline double gamma_func_lower( double x, double nx, double npml_bot, double npml_top ) { return cos(PI*((x-1)* nx/(2*(nx+1)*npml_bot))); } inline double gamma_func_upper( double x, double nx, double npml_bot, double npml_top ) { return cos(PI*((1-(x-1)/(nx+1)) * nx/(2*npml_top))); } /* There are two functions associated to the pml, denoted pml_func_lower and pml_func_upper x - in the range [1,nx] nx - length of domain, including pml npml_bot - number of pml pts on the bottom of the domain npml_top - number of pml pts on the top of the domain */ inline double complex pml_func_lower( double x, double nx, double npml_bot, double npml_top ) { double gamma, gammap1; if (x <= npml_bot) { gammap1 = (x==npml_bot) ? 0 : gamma_func_lower(x+1,nx,npml_bot,npml_top); gamma = gamma_func_lower(x,nx,npml_bot,npml_top); } else{ if( (x > npml_bot) && (x <= nx+2-npml_bot) ) { gamma = 0; gammap1 = (x==nx+2-npml_bot) ? gamma_func_upper(x+1,nx,npml_bot,npml_top) : 0; } else { gamma = gamma_func_upper(x,nx,npml_bot,npml_top); gammap1 = gamma_func_upper(x+1,nx,npml_bot,npml_top); } } return (2.0 + 0.0*I)/( (2.0 - gammap1*(gammap1+gamma)) + (3.0*gammap1+gamma)*I); } inline double complex pml_func_upper( double x, double nx, double npml_bot, double npml_top ) { double gammap2, gammap1; if (x <= npml_bot) { gammap1 = (x==npml_bot) ? 0 : gamma_func_lower(x+1,nx,npml_bot,npml_top); gammap2 = (x >= npml_bot-1) ? 0 : gamma_func_lower(x+2,nx,npml_bot,npml_top); } else{ if( (x > npml_bot) && (x <= nx+2-npml_bot) ) { gammap2 = (x>=nx+1-npml_bot) ? gamma_func_upper(x+2,nx,npml_bot,npml_top) : 0; gammap1 = (x==nx+2-npml_bot) ? gamma_func_upper(x+1,nx,npml_bot,npml_top) : 0; } else { gammap2 = gamma_func_upper(x+2,nx,npml_bot,npml_top); gammap1 = gamma_func_upper(x+1,nx,npml_bot,npml_top); } } return (2.0 + 0.0*I)/( (2.0 - gammap1*(gammap1+gammap2)) + (3.0*gammap1+gammap2)*I); } inline void pmlzfunc(pml_info * p, pml_adj_info * padj, int k, int nz, int npmlz_lo, int npmlz_hi, int pmlz_alloc) { #ifdef ADJ padj->pmlz_lo_window[0] = pml_func_lower(k,nz,npmlz_lo,npmlz_hi); padj->pmlz_hi_window[0] = pml_func_upper(k,nz,npmlz_lo,npmlz_hi); padj->pmlz_lo_window[1] = pml_func_lower(k+1,nz,npmlz_lo,npmlz_hi); padj->pmlz_hi_window[1] = pml_func_upper(k+1,nz,npmlz_lo,npmlz_hi); padj->pmlz_lo_window[2] = pml_func_lower(k+2,nz,npmlz_lo,npmlz_hi); padj->pmlz_hi_window[2] = pml_func_upper(k+2,nz,npmlz_lo,npmlz_hi); p->pmlz = padj->pmlz_lo_window[1] + padj->pmlz_hi_window[1]; #else p->pmlz_lo = pml_func_lower(k+1,nz,npmlz_lo, npmlz_hi); p->pmlz_hi = pml_func_upper(k+1,nz,npmlz_lo, npmlz_hi); p->pmlz = p->pmlz_lo + p->pmlz_hi; #endif } inline void pmlyfunc(pml_info * p, pml_adj_info * padj, int j, int ny, int npmly_lo, int npmly_hi, int pmly_alloc) { #ifdef ADJ padj->pmly_lo_window[0] = pml_func_lower(j,ny,npmly_lo,npmly_hi); padj->pmly_hi_window[0] = pml_func_upper(j,ny,npmly_lo,npmly_hi); padj->pmly_lo_window[1] = pml_func_lower(j+1,ny,npmly_lo,npmly_hi); padj->pmly_hi_window[1] = pml_func_upper(j+1,ny,npmly_lo,npmly_hi); padj->pmly_lo_window[2] = pml_func_lower(j+2,ny,npmly_lo,npmly_hi); padj->pmly_hi_window[2] = pml_func_upper(j+2,ny,npmly_lo,npmly_hi); p->pmly = padj->pmly_lo_window[1] + padj->pmly_hi_window[1]; #else p->pmly_lo = pml_func_lower(j+1,ny,npmly_lo, npmly_hi); p->pmly_hi = pml_func_upper(j+1,ny,npmly_lo, npmly_hi); p->pmly = p->pmly_lo + p->pmly_hi; #endif } inline void pmlxfunc(pml_info * p, pml_adj_info * padj, int i, int nx, int npmlx_lo, int npmlx_hi, int pmlx_alloc, int i_incr) { #ifdef ADJ if(!pmlx_alloc) { padj->pmlx_lo_window[0] = pml_func_lower(i,nx,npmlx_lo,npmlx_hi); padj->pmlx_hi_window[0] = pml_func_upper(i,nx,npmlx_lo,npmlx_hi); padj->pmlx_lo_window[1] = pml_func_lower(i+1,nx,npmlx_lo,npmlx_hi); padj->pmlx_hi_window[1] = pml_func_upper(i+1,nx,npmlx_lo,npmlx_hi); padj->pmlx_lo_window[2] = pml_func_lower(i+2,nx,npmlx_lo,npmlx_hi); padj->pmlx_hi_window[2] = pml_func_upper(i+2,nx,npmlx_lo,npmlx_hi); } else { if(i_incr) { padj->pmlx_lo_window[0] = padj->pmlx_lo_window[1]; padj->pmlx_lo_window[1] = padj->pmlx_lo_window[2]; padj->pmlx_lo_window[2] = pml_func_lower(i+2,nx,npmlx_lo,npmlx_hi); padj->pmlx_hi_window[0] = padj->pmlx_hi_window[1]; padj->pmlx_hi_window[1] = padj->pmlx_hi_window[2]; padj->pmlx_hi_window[2] = pml_func_upper(i+2,nx,npmlx_lo,npmlx_hi); } else { padj->pmlx_lo_window[2] = padj->pmlx_lo_window[1]; padj->pmlx_lo_window[1] = padj->pmlx_lo_window[0]; padj->pmlx_lo_window[0] = pml_func_lower(i,nx,npmlx_lo,npmlx_hi); padj->pmlx_hi_window[2] = padj->pmlx_hi_window[1]; padj->pmlx_hi_window[1] = padj->pmlx_hi_window[0]; padj->pmlx_hi_window[0] = pml_func_upper(i,nx,npmlx_lo,npmlx_hi); } } p->pmlx = padj->pmlx_lo_window[1] + padj->pmlx_hi_window[1]; #else p->pmlx_lo = pml_func_lower(i+1,nx,npmlx_lo, npmlx_hi); p->pmlx_hi = pml_func_upper(i+1,nx,npmlx_lo, npmlx_hi); p->pmlx = p->pmlx_lo + p->pmlx_hi; #endif } /******************************** Read/write 27pt neighbourhoods of real/complex values ********************************/ inline void load_nbrhoodc( double complex * x, const double * xr, const double * xi, int i, int j, int k, int nx, int ny, int nz, int s, pml_info p ) { x[MMM] = MMM_BDRY(CMPLX( xr[ IDX1D4(i-1,j-1,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j-1,k-1,s,nx,ny,nz) ] )); x[NMM] = NMM_BDRY(CMPLX( xr[ IDX1D4(i ,j-1,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i ,j-1,k-1,s,nx,ny,nz) ] )); x[PMM] = PMM_BDRY(CMPLX( xr[ IDX1D4(i+1,j-1,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j-1,k-1,s,nx,ny,nz) ] )); x[MNM] = MNM_BDRY(CMPLX( xr[ IDX1D4(i-1,j ,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j ,k-1,s,nx,ny,nz) ] )); x[NNM] = NNM_BDRY(CMPLX( xr[ IDX1D4(i ,j ,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i ,j ,k-1,s,nx,ny,nz) ] )); x[PNM] = PNM_BDRY(CMPLX( xr[ IDX1D4(i+1,j ,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j ,k-1,s,nx,ny,nz) ] )); x[MPM] = MPM_BDRY(CMPLX( xr[ IDX1D4(i-1,j+1,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j+1,k-1,s,nx,ny,nz) ] )); x[NPM] = NPM_BDRY(CMPLX( xr[ IDX1D4(i ,j+1,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i ,j+1,k-1,s,nx,ny,nz) ] )); x[PPM] = PPM_BDRY(CMPLX( xr[ IDX1D4(i+1,j+1,k-1,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j+1,k-1,s,nx,ny,nz) ] )); x[MMN] = MMN_BDRY(CMPLX( xr[ IDX1D4(i-1,j-1,k ,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j-1,k ,s,nx,ny,nz) ] )); x[NMN] = NMN_BDRY(CMPLX( xr[ IDX1D4(i ,j-1,k ,s,nx,ny,nz) ], xi[ IDX1D4(i ,j-1,k ,s,nx,ny,nz) ] )); x[PMN] = PMN_BDRY(CMPLX( xr[ IDX1D4(i+1,j-1,k ,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j-1,k ,s,nx,ny,nz) ] )); x[MNN] = MNN_BDRY(CMPLX( xr[ IDX1D4(i-1,j ,k ,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j ,k ,s,nx,ny,nz) ] )); x[NNN] = CMPLX( xr[ IDX1D4(i ,j ,k ,s,nx,ny,nz) ], xi[ IDX1D4(i ,j ,k ,s,nx,ny,nz) ] ); x[PNN] = PNN_BDRY(CMPLX( xr[ IDX1D4(i+1,j ,k ,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j ,k ,s,nx,ny,nz) ] )); x[MPN] = MPN_BDRY(CMPLX( xr[ IDX1D4(i-1,j+1,k ,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j+1,k ,s,nx,ny,nz) ] )); x[NPN] = NPN_BDRY(CMPLX( xr[ IDX1D4(i ,j+1,k ,s,nx,ny,nz) ], xi[ IDX1D4(i ,j+1,k ,s,nx,ny,nz) ] )); x[PPN] = PPN_BDRY(CMPLX( xr[ IDX1D4(i+1,j+1,k ,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j+1,k ,s,nx,ny,nz) ] )); x[MMP] = MMP_BDRY(CMPLX( xr[ IDX1D4(i-1,j-1,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j-1,k+1,s,nx,ny,nz) ] )); x[NMP] = NMP_BDRY(CMPLX( xr[ IDX1D4(i ,j-1,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i ,j-1,k+1,s,nx,ny,nz) ] )); x[PMP] = PMP_BDRY(CMPLX( xr[ IDX1D4(i+1,j-1,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j-1,k+1,s,nx,ny,nz) ] )); x[MNP] = MNP_BDRY(CMPLX( xr[ IDX1D4(i-1,j ,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j ,k+1,s,nx,ny,nz) ] )); x[NNP] = NNP_BDRY(CMPLX( xr[ IDX1D4(i ,j ,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i ,j ,k+1,s,nx,ny,nz) ] )); x[PNP] = PNP_BDRY(CMPLX( xr[ IDX1D4(i+1,j ,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j ,k+1,s,nx,ny,nz) ] )); x[MPP] = MPP_BDRY(CMPLX( xr[ IDX1D4(i-1,j+1,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i-1,j+1,k+1,s,nx,ny,nz) ] )); x[NPP] = NPP_BDRY(CMPLX( xr[ IDX1D4(i ,j+1,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i ,j+1,k+1,s,nx,ny,nz) ] )); x[PPP] = PPP_BDRY(CMPLX( xr[ IDX1D4(i+1,j+1,k+1,s,nx,ny,nz) ], xi[ IDX1D4(i+1,j+1,k+1,s,nx,ny,nz) ] )); } inline void nbrhood_update(double complex coef[27],double * yr, double * yi, int i, int j,int k, int nx, int ny, int nz,int s,pml_info p) { MMM_IF( yr[ IDX1D4(i-1,j-1,k-1,s,nx,ny,nz) ] += creal(coef[MMM]); yi[ IDX1D4(i-1,j-1,k-1,s,nx,ny,nz) ] += cimag(coef[MMM]); ) NMM_IF( yr[ IDX1D4(i ,j-1,k-1,s,nx,ny,nz) ] += creal(coef[NMM]); yi[ IDX1D4(i ,j-1,k-1,s,nx,ny,nz) ] += cimag(coef[NMM]); ) PMM_IF( yr[ IDX1D4(i+1,j-1,k-1,s,nx,ny,nz) ] += creal(coef[PMM]); yi[ IDX1D4(i+1,j-1,k-1,s,nx,ny,nz) ] += cimag(coef[PMM]); ) MNM_IF( yr[ IDX1D4(i-1,j ,k-1,s,nx,ny,nz) ] += creal(coef[MNM]); yi[ IDX1D4(i-1,j ,k-1,s,nx,ny,nz) ] += cimag(coef[MNM]); ) NNM_IF( yr[ IDX1D4(i ,j ,k-1,s,nx,ny,nz) ] += creal(coef[NNM]); yi[ IDX1D4(i ,j ,k-1,s,nx,ny,nz) ] += cimag(coef[NNM]); ) PNM_IF( yr[ IDX1D4(i+1,j ,k-1,s,nx,ny,nz) ] += creal(coef[PNM]); yi[ IDX1D4(i+1,j ,k-1,s,nx,ny,nz) ] += cimag(coef[PNM]); ) MPM_IF( yr[ IDX1D4(i-1,j+1,k-1,s,nx,ny,nz) ] += creal(coef[MPM]); yi[ IDX1D4(i-1,j+1,k-1,s,nx,ny,nz) ] += cimag(coef[MPM]); ) NPM_IF( yr[ IDX1D4(i ,j+1,k-1,s,nx,ny,nz) ] += creal(coef[NPM]); yi[ IDX1D4(i ,j+1,k-1,s,nx,ny,nz) ] += cimag(coef[NPM]); ) PPM_IF( yr[ IDX1D4(i+1,j+1,k-1,s,nx,ny,nz) ] += creal(coef[PPM]); yi[ IDX1D4(i+1,j+1,k-1,s,nx,ny,nz) ] += cimag(coef[PPM]); ) MMN_IF( yr[ IDX1D4(i-1,j-1,k ,s,nx,ny,nz) ] += creal(coef[MMN]); yi[ IDX1D4(i-1,j-1,k ,s,nx,ny,nz) ] += cimag(coef[MMN]); ) NMN_IF( yr[ IDX1D4(i ,j-1,k ,s,nx,ny,nz) ] += creal(coef[NMN]); yi[ IDX1D4(i ,j-1,k ,s,nx,ny,nz) ] += cimag(coef[NMN]); ) PMN_IF( yr[ IDX1D4(i+1,j-1,k ,s,nx,ny,nz) ] += creal(coef[PMN]); yi[ IDX1D4(i+1,j-1,k ,s,nx,ny,nz) ] += cimag(coef[PMN]); ) MNN_IF( yr[ IDX1D4(i-1,j ,k ,s,nx,ny,nz) ] += creal(coef[MNN]); yi[ IDX1D4(i-1,j ,k ,s,nx,ny,nz) ] += cimag(coef[MNN]); ) yr[ IDX1D4(i ,j ,k ,s,nx,ny,nz) ] += creal(coef[NNN]); yi[ IDX1D4(i ,j ,k ,s,nx,ny,nz) ] += cimag(coef[NNN]); PNN_IF( yr[ IDX1D4(i+1,j ,k ,s,nx,ny,nz) ] += creal(coef[PNN]); yi[ IDX1D4(i+1,j ,k ,s,nx,ny,nz) ] += cimag(coef[PNN]); ) MPN_IF( yr[ IDX1D4(i-1,j+1,k ,s,nx,ny,nz) ] += creal(coef[MPN]); yi[ IDX1D4(i-1,j+1,k ,s,nx,ny,nz) ] += cimag(coef[MPN]); ) NPN_IF( yr[ IDX1D4(i ,j+1,k ,s,nx,ny,nz) ] += creal(coef[NPN]); yi[ IDX1D4(i ,j+1,k ,s,nx,ny,nz) ] += cimag(coef[NPN]); ) PPN_IF( yr[ IDX1D4(i+1,j+1,k ,s,nx,ny,nz) ] += creal(coef[PPN]); yi[ IDX1D4(i+1,j+1,k ,s,nx,ny,nz) ] += cimag(coef[PPN]); ) MMP_IF( yr[ IDX1D4(i-1,j-1,k+1,s,nx,ny,nz) ] += creal(coef[MMP]); yi[ IDX1D4(i-1,j-1,k+1,s,nx,ny,nz) ] += cimag(coef[MMP]); ) NMP_IF( yr[ IDX1D4(i ,j-1,k+1,s,nx,ny,nz) ] += creal(coef[NMP]); yi[ IDX1D4(i ,j-1,k+1,s,nx,ny,nz) ] += cimag(coef[NMP]); ) PMP_IF( yr[ IDX1D4(i+1,j-1,k+1,s,nx,ny,nz) ] += creal(coef[PMP]); yi[ IDX1D4(i+1,j-1,k+1,s,nx,ny,nz) ] += cimag(coef[PMP]); ) MNP_IF( yr[ IDX1D4(i-1,j ,k+1,s,nx,ny,nz) ] += creal(coef[MNP]); yi[ IDX1D4(i-1,j ,k+1,s,nx,ny,nz) ] += cimag(coef[MNP]); ) NNP_IF( yr[ IDX1D4(i ,j ,k+1,s,nx,ny,nz) ] += creal(coef[NNP]); yi[ IDX1D4(i ,j ,k+1,s,nx,ny,nz) ] += cimag(coef[NNP]); ) PNP_IF( yr[ IDX1D4(i+1,j ,k+1,s,nx,ny,nz) ] += creal(coef[PNP]); yi[ IDX1D4(i+1,j ,k+1,s,nx,ny,nz) ] += cimag(coef[PNP]); ) MPP_IF( yr[ IDX1D4(i-1,j+1,k+1,s,nx,ny,nz) ] += creal(coef[MPP]); yi[ IDX1D4(i-1,j+1,k+1,s,nx,ny,nz) ] += cimag(coef[MPP]); ) NPP_IF( yr[ IDX1D4(i ,j+1,k+1,s,nx,ny,nz) ] += creal(coef[NPP]); yi[ IDX1D4(i ,j+1,k+1,s,nx,ny,nz) ] += cimag(coef[NPP]); ) PPP_IF( yr[ IDX1D4(i+1,j+1,k+1,s,nx,ny,nz) ] += creal(coef[PPP]); yi[ IDX1D4(i+1,j+1,k+1,s,nx,ny,nz) ] += cimag(coef[PPP]); ) } inline void load_nbrhoodr( double * x, const double * xr, int i, int j, int k, int nx, int ny, int nz, pml_info p ) { x[MMM] = MMM_BDRY(xr[ IDX1D4(i-1,j-1,k-1,0,nx,ny,nz) ]); x[NMM] = NMM_BDRY(xr[ IDX1D4(i ,j-1,k-1,0,nx,ny,nz) ]); x[PMM] = PMM_BDRY(xr[ IDX1D4(i+1,j-1,k-1,0,nx,ny,nz) ]); x[MNM] = MNM_BDRY(xr[ IDX1D4(i-1,j ,k-1,0,nx,ny,nz) ]); x[NNM] = NNM_BDRY(xr[ IDX1D4(i ,j ,k-1,0,nx,ny,nz) ]); x[PNM] = PNM_BDRY(xr[ IDX1D4(i+1,j ,k-1,0,nx,ny,nz) ]); x[MPM] = MPM_BDRY(xr[ IDX1D4(i-1,j+1,k-1,0,nx,ny,nz) ]); x[NPM] = NPM_BDRY(xr[ IDX1D4(i ,j+1,k-1,0,nx,ny,nz) ]); x[PPM] = PPM_BDRY(xr[ IDX1D4(i+1,j+1,k-1,0,nx,ny,nz) ]); x[MMN] = MMN_BDRY(xr[ IDX1D4(i-1,j-1,k ,0,nx,ny,nz) ]); x[NMN] = NMN_BDRY(xr[ IDX1D4(i ,j-1,k ,0,nx,ny,nz) ]); x[PMN] = PMN_BDRY(xr[ IDX1D4(i+1,j-1,k ,0,nx,ny,nz) ]); x[MNN] = MNN_BDRY(xr[ IDX1D4(i-1,j ,k ,0,nx,ny,nz) ]); x[NNN] = xr[ IDX1D4(i ,j ,k ,0,nx,ny,nz) ]; x[PNN] = PNN_BDRY(xr[ IDX1D4(i+1,j ,k ,0,nx,ny,nz) ]); x[MPN] = MPN_BDRY(xr[ IDX1D4(i-1,j+1,k ,0,nx,ny,nz) ]); x[NPN] = NPN_BDRY(xr[ IDX1D4(i ,j+1,k ,0,nx,ny,nz) ]); x[PPN] = PPN_BDRY(xr[ IDX1D4(i+1,j+1,k ,0,nx,ny,nz) ]); x[MMP] = MMP_BDRY(xr[ IDX1D4(i-1,j-1,k+1,0,nx,ny,nz) ]); x[NMP] = NMP_BDRY(xr[ IDX1D4(i ,j-1,k+1,0,nx,ny,nz) ]); x[PMP] = PMP_BDRY(xr[ IDX1D4(i+1,j-1,k+1,0,nx,ny,nz) ]); x[MNP] = MNP_BDRY(xr[ IDX1D4(i-1,j ,k+1,0,nx,ny,nz) ]); x[NNP] = NNP_BDRY(xr[ IDX1D4(i ,j ,k+1,0,nx,ny,nz) ]); x[PNP] = PNP_BDRY(xr[ IDX1D4(i+1,j ,k+1,0,nx,ny,nz) ]); x[MPP] = MPP_BDRY(xr[ IDX1D4(i-1,j+1,k+1,0,nx,ny,nz) ]); x[NPP] = NPP_BDRY(xr[ IDX1D4(i ,j+1,k+1,0,nx,ny,nz) ]); x[PPP] = PPP_BDRY(xr[ IDX1D4(i+1,j+1,k+1,0,nx,ny,nz) ]); } inline void load_wn_nbrhood(wn_type wn_window[27],const double * wnr,const double * wni, int i, int j, int k, int nx, int ny, int nz, pml_info p) { #ifdef WNCMPLX load_nbrhoodc(wn_window,wnr,wni,i,j,k,nx,ny,nz,0,p); #else load_nbrhoodr(wn_window,wnr,i,j,k,nx,ny,nz,p); #endif } /******************************** Coefficient computation functions ********************************/ inline coef_consts compute_coef_consts(const double * h) { /* Compute constants associated to stencil computations */ double hx = h[0]; double hy = h[1]; double hz = h[2]; double hx2 = hx*hx, hy2 = hy*hy, hz2 = hz*hz; double hxy = hx2 +hy2; double hxz = hx2 + hz2; double hyz = hy2 + hz2; double hxyz = hx2 + hy2 + hz2; double W3A = (W3)*3/(4*hxyz); double W3A_2 = 2*W3A; double wn_coef = -(W1 + 3*W2 + 16*W3A*hxyz/3 + WM1-1); double wn_xcoef = (W1/hx2 + W2/hx2 + W2/hxz + W2/hxy + 8*W3A); double wn_ycoef = (W1/hy2 + W2/hy2 + W2/hyz + W2/hxy + 8*W3A); double wn_zcoef = (W1/hz2 + W2/hz2 + W2/hxz + W2/hyz + 8*W3A); double pmlx_coef = -(W1/hx2 + W2/hx2 + W2/hxz + W2/hxy + 8*W3A); double pmly_coef = -(W1/hy2 + W2/hy2 + W2/hyz + W2/hxy + 8*W3A); double pmlz_coef = -(W1/hz2 + W2/hz2 + W2/hxz + W2/hyz + 8*W3A); double xz_coef = W2/(2*hxz); double xy_coef = W2/(2*hxy); double yz_coef = W2/(2*hyz); coef_consts c = { .W3A_2 = W3A_2, .wn_coef = wn_coef, .wn_xcoef = wn_xcoef, .wn_ycoef = wn_ycoef, .wn_zcoef = wn_zcoef, .pmlx_coef = pmlx_coef, .pmly_coef = pmly_coef, .pmlz_coef = pmlz_coef, .xz_coef = xz_coef, .xy_coef = xy_coef, .yz_coef = yz_coef }; return c; } inline void get_coefs(double complex coef[27], const wn_type wn_window[27], const coef_consts c, const pml_info p, const pml_adj_info padj) { #ifndef ADJ /* Compute coefficients - forward mode */ coef[MMM] = MMM_BDRY(- WM4*wn_window[MMM] + non_deriv_mode*(-c.W3A_2*( p.pmlx_lo + p.pmly_lo + p.pmlz_lo ))); coef[NMM] = NMM_BDRY(- WM3*wn_window[NMM] + non_deriv_mode*(-c.yz_coef * (p.pmlz_lo + p.pmly_lo) + c.W3A_2*p.pmlx)); coef[PMM] = PMM_BDRY(- WM4*wn_window[PMM] + non_deriv_mode*(-c.W3A_2*( p.pmlx_hi + p.pmly_lo + p.pmlz_lo ))); coef[MNM] = MNM_BDRY( - WM3*wn_window[MNM] + non_deriv_mode*(-c.xz_coef * (p.pmlz_lo + p.pmlx_lo) + c.W3A_2*p.pmly)); coef[NNM] = NNM_BDRY(- WM2*wn_window[NNM] + non_deriv_mode*(c.pmlz_coef*p.pmlz_lo + c.yz_coef*p.pmly + c.xz_coef*p.pmlx)); coef[PNM] = PNM_BDRY(- WM3*wn_window[PNM] + non_deriv_mode*(-c.xz_coef * (p.pmlz_lo + p.pmlx_hi) + c.W3A_2*p.pmly)); coef[MPM] = MPM_BDRY(- WM4*wn_window[MPM] + non_deriv_mode*(-c.W3A_2*( p.pmlx_lo + p.pmly_hi + p.pmlz_lo ))); coef[NPM] = NPM_BDRY(- WM3*wn_window[NPM] + non_deriv_mode*(-c.yz_coef * (p.pmly_hi + p.pmlz_lo) + c.W3A_2*p.pmlx)); coef[PPM] = PPM_BDRY(- WM4*wn_window[PPM] + non_deriv_mode*(-c.W3A_2*( p.pmlx_hi + p.pmly_hi + p.pmlz_lo ))); coef[MMN] = MMN_BDRY(- WM3*wn_window[MMN] + non_deriv_mode*(-c.xy_coef * (p.pmlx_lo + p.pmly_lo) + c.W3A_2*p.pmlz)); coef[NMN] = NMN_BDRY(- WM2*wn_window[NMN] + non_deriv_mode*(c.pmly_coef*p.pmly_lo + c.yz_coef*p.pmlz + c.xy_coef*p.pmlx)); coef[PMN] = PMN_BDRY(- WM3*wn_window[PMN] + non_deriv_mode*(-c.xy_coef * (p.pmlx_hi + p.pmly_lo) + c.W3A_2*p.pmlz)); coef[MNN] = MNN_BDRY(- WM2*wn_window[MNN]+non_deriv_mode*(c.pmlx_coef*p.pmlx_lo + c.xz_coef*p.pmlz + c.xy_coef*p.pmly)); coef[NNN] = c.wn_coef*wn_window[NNN] + non_deriv_mode*(c.wn_xcoef*p.pmlx + c.wn_ycoef*p.pmly + c.wn_zcoef*p.pmlz); coef[PNN] = PNN_BDRY(- WM2*wn_window[PNN] + non_deriv_mode*(c.pmlx_coef*p.pmlx_hi + c.xz_coef*p.pmlz + c.xy_coef*p.pmly)); coef[MPN] = MPN_BDRY(- WM3*wn_window[MPN] + non_deriv_mode*(-c.xy_coef * (p.pmlx_lo + p.pmly_hi) + c.W3A_2*p.pmlz)); coef[NPN] = NPN_BDRY(- WM2*wn_window[NPN] + non_deriv_mode*(c.pmly_coef*p.pmly_hi + c.yz_coef*p.pmlz + c.xy_coef*p.pmlx)); coef[PPN] = PPN_BDRY(- WM3*wn_window[PPN] + non_deriv_mode*(-c.xy_coef * (p.pmlx_hi + p.pmly_hi) + c.W3A_2*p.pmlz)); coef[MMP] = MMP_BDRY(- WM4*wn_window[MMP] + non_deriv_mode*(-c.W3A_2*( p.pmlx_lo + p.pmly_lo + p.pmlz_hi ))); coef[NMP] = NMP_BDRY(- WM3*wn_window[NMP] + non_deriv_mode*(-c.yz_coef * (p.pmly_lo + p.pmlz_hi) + c.W3A_2*p.pmlx)); coef[PMP] = PMP_BDRY(- WM4*wn_window[PMP] + non_deriv_mode*(-c.W3A_2*( p.pmlx_hi + p.pmly_lo + p.pmlz_hi ))); coef[MNP] = MNP_BDRY(- WM3*wn_window[MNP] + non_deriv_mode*(-c.xz_coef * (p.pmlz_hi + p.pmlx_lo) + c.W3A_2*p.pmly)); coef[NNP] = NNP_BDRY(- WM2*wn_window[NNP] + non_deriv_mode*(c.pmlz_coef*p.pmlz_hi + c.yz_coef*p.pmly + c.xz_coef*p.pmlx)); coef[PNP] = PNP_BDRY(- WM3*wn_window[PNP] + non_deriv_mode*(-c.xz_coef * (p.pmlz_hi + p.pmlx_hi) + c.W3A_2*p.pmly)); coef[MPP] = MPP_BDRY(- WM4*wn_window[MPP] + non_deriv_mode*(-c.W3A_2*( p.pmlx_lo + p.pmly_hi + p.pmlz_hi ))); coef[NPP] = NPP_BDRY(- WM3*wn_window[NPP] + non_deriv_mode*(-c.yz_coef * (p.pmlz_hi + p.pmly_hi) + c.W3A_2*p.pmlx)); coef[PPP] = PPP_BDRY(- WM4*wn_window[PPP] + non_deriv_mode*(-c.W3A_2*( p.pmlx_hi + p.pmly_hi + p.pmlz_hi ))); #else /* Compute coefficients - adjoint mode */ coef[MMM] = - WM4*wn_window[NNN]; coef[NMM] = - WM3*wn_window[NNN]; coef[PMM] = - WM4*wn_window[NNN]; coef[MNM] = - WM3*wn_window[NNN]; coef[NNM] = - WM2*wn_window[NNN]; coef[PNM] = - WM3*wn_window[NNN]; coef[MPM] = - WM4*wn_window[NNN]; coef[NPM] = - WM3*wn_window[NNN]; coef[PPM] = - WM4*wn_window[NNN]; coef[MMN] = - WM3*wn_window[NNN]; coef[NMN] = - WM2*wn_window[NNN]; coef[PMN] = - WM3*wn_window[NNN]; coef[MNN] = - WM2*wn_window[NNN]; coef[NNN] = c.wn_coef*wn_window[NNN]; coef[PNN] = - WM2*wn_window[NNN]; coef[MPN] = - WM3*wn_window[NNN]; coef[NPN] = - WM2*wn_window[NNN]; coef[PPN] = - WM3*wn_window[NNN]; coef[MMP] = - WM4*wn_window[NNN]; coef[NMP] = - WM3*wn_window[NNN]; coef[PMP] = - WM4*wn_window[NNN]; coef[MNP] = - WM3*wn_window[NNN]; coef[NNP] = - WM2*wn_window[NNN]; coef[PNP] = - WM3*wn_window[NNN]; coef[MPP] = - WM4*wn_window[NNN]; coef[NPP] = - WM3*wn_window[NNN]; coef[PPP] = - WM4*wn_window[NNN]; #ifndef DERIV // (+1,0,0) coef for (i-1,j,k) coef[MNN] += c.pmlx_coef*padj.pmlx_hi_window[0] + c.xz_coef*p.pmlz + c.xy_coef*p.pmly; // (-1,0,0) coef for (i+1,j,k) coef[PNN] += c.pmlx_coef*padj.pmlx_lo_window[2] + c.xz_coef*p.pmlz + c.xy_coef*p.pmly; // (0,+1,0) coef for (i,j-1,k) coef[NMN] += c.pmly_coef*padj.pmly_hi_window[0] + c.yz_coef*p.pmlz + c.xy_coef*p.pmlx; // (0,-1,0) coef for (i,j+1,k) coef[NPN] += c.pmly_coef*padj.pmly_lo_window[2] + c.yz_coef*p.pmlz + c.xy_coef*p.pmlx; // (0,0,+1) coef for (i,j,k-1) coef[NNM] += c.pmlz_coef*padj.pmlz_hi_window[0] + c.yz_coef*p.pmly + c.xz_coef*p.pmlx; // (0,0,-1) coef for (i,j,k+1) coef[NNP] += c.pmlz_coef*padj.pmlz_lo_window[2] + c.yz_coef*p.pmly + c.xz_coef*p.pmlx; // (0,-1,-1) coef for (i,j+1,k+1) coef[NPP] += -c.yz_coef * (padj.pmlz_lo_window[2] + padj.pmly_lo_window[2]) + c.W3A_2*p.pmlx; // (0,+1,+1) coef for (i,j-1,k-1) coef[NMM] += -c.yz_coef * (padj.pmlz_hi_window[0] + padj.pmly_hi_window[0]) + c.W3A_2*p.pmlx; // (0,-1,+1) coef for (i,j+1,j-1) coef[NPM] += -c.yz_coef * (padj.pmly_lo_window[2] + padj.pmlz_hi_window[0]) + c.W3A_2*p.pmlx; // (0,+1,-1) coef for (i,j-1,j+1) coef[NMP] += -c.yz_coef * (padj.pmly_hi_window[0] + padj.pmlz_lo_window[2]) + c.W3A_2*p.pmlx; // (-1,0,-1) coef for (i+1,j,k+1) coef[PNP] += -c.xz_coef * (padj.pmlz_lo_window[2] + padj.pmlx_lo_window[2]) + c.W3A_2*p.pmly; // (+1,0,+1) coef for (i-1,j,k-1) coef[MNM] += -c.xz_coef * (padj.pmlz_hi_window[0] + padj.pmlx_hi_window[0]) + c.W3A_2*p.pmly; // (-1,0,+1) coef for (i+1,j,k-1) coef[PNM] += -c.xz_coef * (padj.pmlz_hi_window[0] + padj.pmlx_lo_window[2]) + c.W3A_2*p.pmly; // (+1,0,-1) coef for (i-1,j,k+1) coef[MNP] += -c.xz_coef * (padj.pmlz_lo_window[2] + padj.pmlx_hi_window[0]) + c.W3A_2*p.pmly; // (-1,+1,0) coef for (i+1,j-1,k) coef[PMN] += -c.xy_coef * (padj.pmlx_lo_window[2] + padj.pmly_hi_window[0]) + c.W3A_2*p.pmlz; // (+1,-1,0) coef for (i-1,j+1,k) coef[MPN] += -c.xy_coef * (padj.pmlx_hi_window[0] + padj.pmly_lo_window[2]) + c.W3A_2*p.pmlz; // (-1,-1,0) coef for (i+1,j+1,k) coef[PPN] += -c.xy_coef * (padj.pmlx_lo_window[2] + padj.pmly_lo_window[2]) + c.W3A_2*p.pmlz; // (+1,+1,0) coef for (i-1,j-1,k) coef[MMN] += -c.xy_coef * (padj.pmlx_hi_window[0] + padj.pmly_hi_window[0]) + c.W3A_2*p.pmlz; // (-1,-1,-1) coef for (i+1,j+1,k+1) coef[PPP] += -c.W3A_2*( padj.pmlx_lo_window[2] + padj.pmly_lo_window[2] + padj.pmlz_lo_window[2] ); // (+1,+1,+1) coef for (i-1,j-1,k-1) coef[MMM] += -c.W3A_2*( padj.pmlx_hi_window[0] + padj.pmly_hi_window[0] + padj.pmlz_hi_window[0] ); // (-1,-1,+1) coef for (i+1,j+1,k-1) coef[PPM] += -c.W3A_2*( padj.pmlx_lo_window[2] + padj.pmly_lo_window[2] + padj.pmlz_hi_window[0] ); // (+1,+1,-1) coef for (i-1,j-1,k+1) coef[MMP] += -c.W3A_2*( padj.pmlx_hi_window[0] + padj.pmly_hi_window[0] + padj.pmlz_lo_window[2] ); // (-1,+1,-1) coef for (i+1,j-1,k+1) coef[PMP] += -c.W3A_2*( padj.pmlx_lo_window[2] + padj.pmly_hi_window[0] + padj.pmlz_lo_window[2] ); // (+1,-1,+1) coef for (i-1,j+1,k-1) coef[MPM] += -c.W3A_2*( padj.pmlx_hi_window[0] + padj.pmly_lo_window[2] + padj.pmlz_hi_window[0] ); // (-1,+1,+1) coef for (i+1,j-1,k-1) coef[PMM] += -c.W3A_2*( padj.pmlx_lo_window[2] + padj.pmly_hi_window[0] + padj.pmlz_hi_window[0] ); // (+1,-1,-1) coef for (i-1,j+1,k+1) coef[MPP] += -c.W3A_2*( padj.pmlx_hi_window[0] + padj.pmly_lo_window[2] + padj.pmlz_lo_window[2] ); // (0,0,0) coef for (i,j,k) coef[NNN] += c.wn_xcoef*p.pmlx + c.wn_ycoef*p.pmly + c.wn_zcoef*p.pmlz; #endif #endif // Boundary handling #ifdef ADJ for(int t=0; t<27; t++) { coef[t] = conj(coef[t]); } #endif } void do_Hmvp( const double * wnr, const double * wni, const double * h, const double * n, const double * npml, double *yr, double *yi, const double *xr, const double *xi, int nthreads) { int i,j,k,kout,t; int q; int nx = (int)n[0]; int ny = (int)n[1]; int nz = (int)n[2]; int npmlx_lo = (int)npml[0]; int npmlx_hi = (int)npml[1]; int npmly_lo = (int)npml[2]; int npmly_hi = (int)npml[3]; int npmlz_lo = (int)npml[4]; int npmlz_hi = (int)npml[5]; coef_consts c = compute_coef_consts(h); int pmlz_alloc = 0; int pmly_alloc = 0; int pmlx_alloc = 0; double complex coef[27]; double complex x[27]; wn_type wn_window[27]; pml_info p; pml_adj_info padj; double complex y_out; p.x_hasL = 0; p.x_hasR = 1; p.y_hasL = 0; p.y_hasR = 1; p.z_hasR = 0; p.z_hasR = 1; #pragma omp parallel for schedule(static) private(coef,x,wn_window,y_out,kout,i,j,k,t,q) firstprivate(p,padj,pmlx_alloc,pmly_alloc,pmlz_alloc) num_threads(nthreads) xyzloop_up( /* Cache a window of the wavenumber around the current point */ load_wn_nbrhood(wn_window,wnr,wni,i,j,k,nx,ny,nz,p); /* Get coefficients */ get_coefs(coef,wn_window, c, p, padj); /* Cache a window of the wavefield around the current point */ load_nbrhoodc(x,xr,xi,i,j,k,nx,ny,nz,0,p); kout = IDX1D3(i,j,k,nx,ny,nz); y_out = 0.0 + 0.0*I; for(t=0; t<27; t++){ y_out += coef[t] * x[t]; } yr[kout] = creal(y_out); yi[kout] = cimag(y_out); ) }
dof_updater.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_DOF_UPDATER_H_INCLUDED ) #define KRATOS_DOF_UPDATER_H_INCLUDED // Project includes #include "includes/define.h" #include "includes/model_part.h" namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Utility class to update the values of degree of freedom (Dof) variables after solving the system. /** This class encapsulates the operation of updating nodal degrees of freedom after a system solution. * In pseudo-code, the operation to be performed is * for each dof: dof.variable += dx[dof.equation_id] * This operation is a simple loop in shared memory, but requires additional infrastructure in MPI, * to obtain out-of-process update data. DofUpdater takes care of both the operation and the eventual * auxiliary infrastructure. * @see TrilinosDofUpdater for the trilinos version. */ template< class TSparseSpace > class DofUpdater { public: ///@name Type Definitions ///@{ /// Pointer definition of DofUpdater KRATOS_CLASS_POINTER_DEFINITION(DofUpdater); using DofType = Dof<typename TSparseSpace::DataType>; using DofsArrayType = PointerVectorSet< DofType, SetIdentityFunction<DofType>, std::less<typename SetIdentityFunction<DofType>::result_type>, std::equal_to<typename SetIdentityFunction<DofType>::result_type>, DofType* >; using SystemVectorType = typename TSparseSpace::VectorType; ///@} ///@name Life Cycle ///@{ /// Default constructor. DofUpdater(){} /// Deleted copy constructor DofUpdater(DofUpdater const& rOther) = delete; /// Destructor. virtual ~DofUpdater(){} /// Deleted assignment operator DofUpdater& operator=(DofUpdater const& rOther) = delete; ///@} ///@name Operations ///@{ /// Create a new instance of this class. /** This function is used by the SparseSpace class to create new * DofUpdater instances of the appropriate type. * @return a std::unique_pointer to the new instance. * @see UblasSpace::CreateDofUpdater(), TrilinosSpace::CreateDofUpdater(). */ virtual typename DofUpdater::UniquePointer Create() const { return Kratos::make_unique<DofUpdater>(); } /// Initialize the DofUpdater in preparation for a subsequent UpdateDofs call. /** Note that the base DofUpdater does not have internal data, so this does nothing. * @param[in] rDofSet The list of degrees of freedom. * @param[in] rDx The update vector. */ virtual void Initialize( const DofsArrayType& rDofSet, const SystemVectorType& rDx) {} /// Free internal storage to reset the instance and/or optimize memory consumption. /** Note that the base DofUpdater does not have internal data, so this does nothing. */ virtual void Clear() {} /// Calculate new values for the problem's degrees of freedom using the update vector rDx. /** For each Dof in rDofSet, this function calculates the updated value for the corresponding * variable as value += rDx[dof.EquationId()]. * @param[in/out] rDofSet The list of degrees of freedom. * @param[in] rDx The update vector. * This method will check if Initialize() was called before and call it if necessary. */ virtual void UpdateDofs( DofsArrayType& rDofSet, const SystemVectorType& rDx) { const int num_dof = static_cast<int>(rDofSet.size()); #pragma omp parallel for for(int i = 0; i < num_dof; ++i) { auto it_dof = rDofSet.begin() + i; if (it_dof->IsFree()) it_dof->GetSolutionStepValue() += TSparseSpace::GetValue(rDx,it_dof->EquationId()); } } /// Assign new values for the problem's degrees of freedom using the vector rX. /** For each Dof in rDofSet, this function assigns the value for the corresponding * variable as value = rX[dof.EquationId()]. * @param[in/out] rDofSet The list of degrees of freedom. * @param[in] rX The solution vector. * This method will check if Initialize() was called before and call it if necessary. */ virtual void AssignDofs(DofsArrayType& rDofSet, const SystemVectorType& rX) { const int num_dof = static_cast<int>(rDofSet.size()); #pragma omp parallel for for(int i = 0; i < num_dof; ++i) { auto it_dof = rDofSet.begin() + i; if (it_dof->IsFree()) it_dof->GetSolutionStepValue() = TSparseSpace::GetValue(rX,it_dof->EquationId()); } } ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "DofUpdater" ; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << this->Info() << std::endl; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { rOStream << this->Info() << std::endl; } ///@} }; // Class DofUpdater ///@} ///@name Input and output ///@{ /// input stream function template< class TSparseSpace > inline std::istream& operator >> ( std::istream& rIStream, DofUpdater<TSparseSpace>& rThis) { return rIStream; } /// output stream function template< class TSparseSpace > inline std::ostream& operator << ( std::ostream& rOStream, const DofUpdater<TSparseSpace>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_DOF_UPDATER_H_INCLUDED defined
GB_binop__pow_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_int8 // A.*B function (eWiseMult): GB_AemultB__pow_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_int8 // C+=b function (dense accum): GB_Cdense_accumb__pow_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int8 // C=scalar+B GB_bind1st__pow_int8 // C=scalar+B' GB_bind1st_tran__pow_int8 // C=A+scalar GB_bind2nd__pow_int8 // C=A'+scalar GB_bind2nd_tran__pow_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_pow_int8 (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_pow_int8 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT8 || GxB_NO_POW_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_pow_int8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = GB_pow_int8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
farfield.c
#include <stdlib.h> #include <complex.h> #include <math.h> #include <string.h> #include "fastsphere.h" #include "farfield.h" #include "util.h" /* Compute the required order of the root sphere. */ int rootorder (spscat *slist, int nsph, complex double bgk, int ndig) { int i, l; double rad = 0.0, clen; for (i = 0; i < nsph; ++i) { /* The radius of the center of the sphere. */ clen = sqrt(DVDOT(slist[i].cen, slist[i].cen)); /* Make this a radius enclosing all of the current sphere. */ clen += (slist[i].spdesc)->r; /* Find the maxium such radius. */ rad = MAX(rad, clen); } /* Use the excess bandwidth formula to find the number of terms. */ l = exband (bgk * rad, ndig); return l; } /* Shift and combine outgoing plane waves from small spheres into one large * plane-wave pattern. */ int neartofar (complex double *vout, complex double *vin, spscat *slist, int nsph, complex double bgk, shdata *shout, shdata *shin) { int ntin, ntout; double dphi; ntin = shin->ntheta * shin->nphi; ntout = shout->ntheta * shout->nphi; dphi = 2 * M_PI / MAX(shout->nphi, 1); memset (vout, 0, ntout * sizeof(complex double)); #pragma omp parallel default(shared) { int i, j, k, l; double s[3], sdc, sth, phi; complex double *vp, sfact, *buf; spscat *sp; buf = malloc (ntout * sizeof(complex double)); #pragma omp for for (i = 0; i < nsph; ++i) { sp = slist + i; vp = vin + i * ntin; /* Interpolate then spherical scattered field. */ ffsht (vp, shin, sp->spdesc->deg); for (j = 0; j < ntout; ++j) buf[j] = 0; copysh (sp->spdesc->deg, buf, shout->nphi, vp, shin->nphi); ifsht (vp, shin, sp->spdesc->deg); ifsht (buf, shout, sp->spdesc->deg); /* Add the phase-shifted sphere pattern to the total pattern. */ for (j = 0, l = 0; j < shout->ntheta; ++j) { s[2] = shout->theta[j]; sth = sin(acos(shout->theta[j])); for (k = 0; k < shout->nphi; ++k, ++l) { phi = k * dphi; s[0] = sth * cos(phi); s[1] = sth * sin(phi); /* Compute the phase-shift factor. */ sdc = DVDOT(s, slist[i].cen); sfact = cexp (-I * bgk * sdc); /* Augment the pattern, with synchronization. */ #pragma omp critical(outrad) vout[l] += sfact * buf[l]; } } } free (buf); } return ntout; } /* Anterpolate and distribute an incoming field to smaller spheres. Input is * a plane-wave expansion, output is plane-wave expansion. */ int fartonear (complex double *vout, complex double *vin, spscat *slist, int nsph, complex double bgk, shdata *shout, shdata *shin) { int ntin, ntout; double dphi; ntin = shin->ntheta * shin->nphi; ntout = shout->ntheta * shout->nphi; dphi = 2 * M_PI / MAX(shin->nphi, 1); #pragma omp parallel default(shared) { int i, j, k, l; double s[3], sdc, sth, phi; complex double *vp, *buf; spscat *sp; buf = malloc (ntin * sizeof(complex double)); #pragma omp for for (i = 0; i < nsph; ++i) { sp = slist + i; vp = vout + i * ntout; /* Shift the phase of the sphere pattern. */ for (j = 0, l = 0; j < shin->ntheta; ++j) { s[2] = shin->theta[j]; sth = sin(acos(shin->theta[j])); for (k = 0; k < shin->nphi; ++k, ++l) { phi = k * dphi; s[0] = sth * cos(phi); s[1] = sth * sin(phi); /* Compute the phase-shift factor. */ sdc = DVDOT(s, slist[i].cen); buf[l] = vin[l] * cexp (I * bgk * sdc); } } ffsht (buf, shin, sp->spdesc->deg); for (j = 0; j < ntout; ++j) vp[j] = 0; copysh (sp->spdesc->deg, vp, shout->nphi, buf, shin->nphi); ifsht (vp, shout, sp->spdesc->deg); } free (buf); } return ntout; }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold, % const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++,2* image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelInfo *restrict p; register ssize_t x; register Quantum *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,0,q); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) cluster->id,q); break; } } if (cluster == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static inline ssize_t MagickAbsoluteValue(const ssize_t x) { if (x < 0) return(-x); return(x); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; register const Quantum *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const Quantum *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireMagickMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; double average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) return(0.0); for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireQuantumMemory(256,sizeof(*derivative)); second_derivative=(double *) AcquireQuantumMemory(256, sizeof(*second_derivative)); if ((derivative == (double *) NULL) || (second_derivative == (double *) NULL)) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDerivatives"); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) return(0.0); /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(double) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
mm_funcs.c
/* * Rectangular matrix multiplication, started from MIT Cilk matmul.cilk example * */ #include "benchmark.h" //#include "mkl.h" #include "mkl_types.h" #include "mkl_cblas.h" void zero(REAL *A, int n) { int i, j; //#pragma omp for private (i, j) for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { A[i * n + j] = 0.0; } } } void init(REAL *A, int n) { int i, j; //#pragma omp for private (i, j) for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { A[i * n + j] = (double)drand48(); } } } double maxerror(REAL *A, REAL *B, int n) { int i, j; double error = 0.0; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { double diff = (A[i * n + j] - B[i * n + j]) / A[i * n + j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } #ifdef USING_PAPI inline papi_wrap(int error, char name[]) { if(error != PAPI_OK) { printf("Error: %s\n", name); printf(" %s\n", PAPI_strerror(error)); exit(1); } } inline papi_threaded_setup() { int ii; int num_threads = omp_get_num_threads(); int thread_id = omp_get_thread_num(); PAPI_register_thread(); extern int global_eventSet[1]; extern long long int cache[48][4]; // thread_bind(thread_id); for(ii = 0; ii < 4; ii++) cache[thread_id][ii] = 0; PAPI_reset(global_eventSet[0]); return thread_id; } inline papi_threaded_teardown(int id) { PAPI_read(global_eventSet[0], &cache[id][0]); PAPI_unregister_thread(); printf("%-4d %-16lld %-16lld %-16lld %-16lld\n", id, cache[id][0] , cache[id][1], cache[id][2] , cache[id][3]); // PAPI_read(global_eventSet[0], cache[thread_id]); // PAPI_unregister_thread(); // printf("%-4d %-6d %-6d\n", thread_id, // 100 * (int)( ( cache[thread_id][0] - cache[thread_id][1]) / cache[thread_id][0]), // 100 * (int)( ( cache[thread_id][2] - cache[thread_id][3]) / cache[thread_id][2])); // } #endif void mkl_call(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bw) { double alpha = 1, beta = 1; cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans, m, n, p, alpha, A, p, B, n, beta, C, n); } void iter_matmul(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bw) { int i, j, k; for (i = 0; i < n; i++) for (k = 0; k < n; k++) { REAL c = 0.0; for (j = 0; j < n; j++) c += A[i * n + j] * B[k * n + j]; C[i * n + k] = c; } } void omp_matmul(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bw) { #pragma omp parallel shared(A, B, C, n) { int i, j, k; #pragma omp for private(i,j,k) for (i = 0; i < n; i++) { for (k = 0; k < n; k++) { REAL c = 0.0; for (j = 0; j < n; j++) c += A[i * n + j] * B[k * n + j]; C[i * n + k] = c; } } } } /* * A \in M(m, n) * B \in M(n, p) * C \in M(m, p) */ void matmul_omp_task(REAL *A, REAL *B, REAL *C, int m, int n, int p, int ld, int add) { if ((m + n + p) <= 64) { int i, j, k; /* base case */ if (add) { for (i = 0; i < m; i++) for (k = 0; k < p; k++) { REAL c = 0.0; for (j = 0; j < n; j++) c += A[i * ld + j] * B[k * ld + j]; C[i * ld + k] += c; } } else { for (i = 0; i < m; i++) for (k = 0; k < p; k++) { REAL c = 0.0; for (j = 0; j < n; j++) c += A[i * ld + j] * B[k * ld + j]; C[i * ld + k] = c; } } } else if (m >= n && n >= p) { int m1 = m >> 1; #pragma omp task matmul_omp_task(A, B, C, m1, n, p, ld, add); #pragma omp task matmul_omp_task(A + m1 * ld, B, C + m1 * ld, m - m1, n, p, ld, add); } else if (n >= m && n >= p) { int n1 = n >> 1; #pragma omp task matmul_omp_task(A, B, C, m, n1, p, ld, add); #pragma omp taskwait #pragma omp task matmul_omp_task(A + n1, B + n1 , C, m, n - n1, p, ld, 1); } else { int p1 = p >> 1; #pragma omp task matmul_omp_task(A, B, C, m, n, p1, ld, add); #pragma omp task matmul_omp_task(A, B + p1 * ld, C + p1, m, n, p - p1, ld, add); } #pragma omp taskwait } void matmul_omp_task_caller(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bw) { #pragma omp parallel shared(A, B, C, n) { #pragma omp single { matmul_omp_task(A, B, C, n, n, n, n, 0); } } } void matmul_recursive_tile(REAL *A, REAL *B, REAL *C, int m, int n, int p, int ld, int add) { if ((m + n + p) <= 64) { int i, j, k; /* base case */ if (add) { for (i = 0; i < m; i++) for (k = 0; k < p; k++) { REAL c = 0.0; for (j = 0; j < n; j++) c += A[i * ld + j] * B[k * ld + j]; C[i * ld + k] += c; } } else { for (i = 0; i < m; i++) for (k = 0; k < p; k++) { REAL c = 0.0; for (j = 0; j < n; j++) c += A[i * ld + j] * B[k * ld + j]; C[i * ld + k] = c; } } } else if (m >= n && n >= p) { int m1 = m >> 1; matmul_recursive_tile(A, B, C, m1, n, p, ld, add); matmul_recursive_tile(A + m1 * ld, B, C + m1 * ld, m - m1, n, p, ld, add); } else if (n >= m && n >= p) { int n1 = n >> 1; matmul_recursive_tile(A, B, C, m, n1, p, ld, add); matmul_recursive_tile(A + n1, B + n1 , C, m, n - n1, p, ld, 1); } else { int p1 = p >> 1; matmul_recursive_tile(A, B, C, m, n, p1, ld, add); matmul_recursive_tile(A, B + p1 * ld, C + p1, m, n, p - p1, ld, add); } } void matmul_recursive_tile_caller(REAL *A, REAL *B, REAL *C, int m, int n, int p, int ld) { matmul_recursive_tile(A,B,C, m, n, p, n, 0); } /* Input: A(m,n), B(n, p), the matrix array. bwidth, the small block size(height=width). output: C(m,p) Main function: multiplication for small blocks */ void sequential_tiling_smallblockMul(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bwidth) { int i, j, k; //small block multiplication for (i = 0; i < bwidth; i++ ) for (j = 0; j < bwidth; j++) for (k = 0; k < bwidth; k ++) *(C + i * p + j) = *(C + i * p + j) + (*(A + i * n + k)) * (*(B + j * n + k)); } /* Input: A(m,n), B(n, p), the matrix array. bwidth, the small block size(height=width). output: C(m,p) Main Function: split the huge matrix into small blocks. */ void sequential_tiling(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bwidth) { int i, j, k; for (i = 0; i < m; i += bwidth) { for (j = 0; j < p; j += bwidth) { for (k = 0; k < n; k += bwidth) { //Get the current block start addr REAL* pStartA = A + i * n + k; REAL* pStartB = B + j * n + k; REAL* pStartC = C + i * p + j; sequential_tiling_smallblockMul(pStartA, pStartB, pStartC, m, n, p, bwidth); } } } } void omp_dtile(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bwidth) { #pragma omp parallel shared(A, B, C, n) { int num_shcache = 1;//8; /* 8 for crill, and 4 for Hopper (Cray XE6) */ int num_cores_per_shcache = 6; int num_cores = num_shcache * num_cores_per_shcache; int global_id = omp_get_thread_num(); int num_threads = omp_get_num_threads(); //thread_bind(global_id); int master_id = global_id / num_shcache; int local_id = global_id % num_shcache; int M_sub_size = n/num_threads; int M_remain = n % num_threads; /* * A is row-based evenly decomposed * if not evenly distributed, some core gets one more starting from 0 */ int M_start = global_id * M_sub_size; if (global_id < M_remain) { M_start += global_id; M_sub_size ++; } else M_start += M_remain; /* the real work */ int i, j, k; long long int cache[4] = {0,0,0,0}; long long int first[4] = {0,0,0,0}; //PAPI_register_thread(); for (i=0; i<M_sub_size; i++) for (k=0; k<n; k++) { //REAL tmp = 0.0; int ii = M_start + i; REAL tmp = A[ii * n] * B[k*n]; // PAPI_accum(EventSet,first); for (j=1; j<n; j++) tmp += A[ii * n + j] * B[k*n + j]; C[ii*n+k] = tmp; } //PAPI_accum(EventSet, cache); //printf("RT:thread %d, cache: %lld (%lld miss), L3: %lld (%lld miss)\n",global_id, cache[0],cache[1],cache[2],cache[3]); } } void omp_tile_rec(REAL *A, REAL *B, REAL *C, int m, int n, int p, int bwidth) { #pragma omp parallel shared(A, B, C, n) { //long long int cache[4] = {0,0,0,0}; //long long int first[4] = {0,0,0,0}; //PAPI_register_thread(); int num_shcache = 4; /* 8 for crill, and 4 for Hopper (Cray XE6) */ int num_cores_per_shcache = 6; int num_cores = num_shcache * num_cores_per_shcache; int global_id = omp_get_thread_num(); int num_threads = omp_get_num_threads(); //thread_bind(global_id); int master_id = global_id / num_shcache; int local_id = global_id % num_shcache; int M_sub_size = n/num_threads; int M_remain = n % num_threads; /* * A is row-based evenly decomposed * if not evenly distributed, some core gets one more starting from 0 */ int M_start = global_id * M_sub_size; if (global_id < M_remain) { M_start += global_id; M_sub_size ++; } else M_start += M_remain; //PAPI_accum(EventSet, cache); matmul_recursive_tile(A+M_start*n, B, C+M_start*n, M_sub_size, n, n, n, 0); //PAPI_accum(EventSet, cache); //printf("DRT:thread %d, cache: %lld (%lld miss), L3: %lld (%lld miss)\n",global_id, cache[0],cache[1],cache[2],cache[3]); //printf("thread %d to work:\tA_sub row based (start:size): %d:%d\n", global_id, M_start, M_sub_size); } } #ifdef USING_PAPI #ifndef NO_NATIVE PAPI_event_info_t* getEnergyEventInfo() { int numcmp = PAPI_num_components(); int retval, i,cid, j = 0,enum_modifier; const PAPI_component_info_t *component; PAPI_event_info_t info; PAPI_event_info_t *desiredInfo; desiredInfo = (PAPI_event_info_t *) calloc(3, sizeof(PAPI_event_info_t)); unsigned int native = 0x0; for( cid = 0; cid < numcmp; cid++) { i = 0 | PAPI_NATIVE_MASK; component=PAPI_get_component_info(cid); if (component->disabled) continue; native = PAPI_NATIVE_MASK | i; retval=PAPI_enum_cmp_event( &i, PAPI_ENUM_FIRST, cid ); do { memset( &info, 0, sizeof ( info ) ); retval = PAPI_get_event_info( i, &info ); if ( retval != PAPI_OK ) continue; if( strcmp(info.symbol, "rapl:::PACKAGE_ENERGY:PACKAGE0") == 0 ) { desiredInfo[0] = info; //printf("%d,%d - %s\n", cid, i, info.symbol); //printf("%d,%d - %s\n", cid, i, desiredInfo[0].symbol); } if( retval = strcmp(info.symbol, "rapl:::DRAM_ENERGY:PACKAGE0") == 0 ) { desiredInfo[1] = info; //printf("%d,%d - %s\n", cid, i, info.symbol); //printf("%d,%d - %s\n", cid, i, desiredInfo[1].symbol); } if( strcmp(info.symbol, "rapl:::PP0_ENERGY:PACKAGE0") == 0) { desiredInfo[2] = info; //printf("%d,%d - %s\n", cid, i, info.symbol); //printf("%d,%d - %s\n", cid, i, desiredInfo[2].symbol); } } while (PAPI_enum_cmp_event( &i, enum_modifier, cid ) == PAPI_OK ); } return desiredInfo; } #endif PAPI_event_info_t* startPAPI(int *eventSet, int *nativeEventSet, unsigned int testEvents[], int numE) { int retVal = 0; int i; int ret; PAPI_event_info_t *infoEnergy; papi_wrap((ret = PAPI_library_init( PAPI_VER_CURRENT) < 0) ? ret : (ret == PAPI_VER_CURRENT ? PAPI_OK : ret), "PAPI library version mismatch"); papi_wrap( PAPI_thread_init( (unsigned long (*)(void)) omp_get_thread_num), "PAPI thread initialization error"); papi_wrap( PAPI_create_eventset(eventSet), "Error creating eventset"); #ifndef NO_NATIVE papi_wrap( PAPI_create_eventset(nativeEventSet), "Error creating the native eventset"); infoEnergy = getEnergyEventInfo(); for(i=0;i<3;i++) if(infoEnergy[i].event_code > 0) papi_wrap( PAPI_add_event(*nativeEventSet, infoEnergy[i].event_code), "PAPI error adding native event"); papi_wrap(PAPI_start(*nativeEventSet), "Error starting the Native events"); #endif for(i=0; i < numE; i++) papi_wrap( PAPI_add_event(*eventSet, testEvents[i]), "PAPI error adding preset event"); papi_wrap(PAPI_start(*eventSet), "Error starting the event set"); return infoEnergy; } #endif /* void start_papi(int *event_set, int *native_event_set, unsigned int test_events[], unsigned int native_events[]) { int retval = PAPI_library_init(PAPI_VER_CURRENT); if (retval != PAPI_VER_CURRENT && retval > 0) handle_papi_error("PAPI library version mismatch!\n"); if (retval < 0) handle_papi_error("PAPI library version init failure.\n"); if (PAPI_thread_init((long unsigned int (*)(void))omp_get_thread_num) != PAPI_OK) handle_papi_error("PAPI thread init.\n"); if (PAPI_create_eventset(event_set) != PAPI_OK) handle_papi_error("error creating eventset\n"); if (PAPI_add_event(*event_set, PAPI_L2_TCA) != PAPI_OK) handle_papi_error("error adding event 1\n"); if (PAPI_add_event(*event_set, PAPI_L2_TCM) != PAPI_OK) handle_papi_error("error adding event 2\n"); if (PAPI_add_event(*event_set, PAPI_L3_TCA) != PAPI_OK) handle_papi_error("error adding event 3\n"); if (PAPI_add_event(*event_set, PAPI_L3_TCM) != PAPI_OK) handle_papi_error("error adding event 4\n"); if (PAPI_start(*event_set) != PAPI_OK) handle_papi_error("error starting eventset\n"); if (PAPI_create_eventset(native_event_set) != PAPI_OK) handle_papi_error("error creating eventset\n"); if( PAPI_event_name_to_code("rapl:::PACKAGE_ENERGY:PACKAGE0", &package_energy) != PAPI_OK); handle_papi_error("error translating event 5\n"); if( PAPI_event_name_to_code("rapl:::PP0_ENERGY:PACKAGE0", &pp0_energy) != PAPI_OK); handle_papi_error("error translating event 6\n"); if( PAPI_event_name_to_code("rapl:::DRAM_ENERGY:PACKAGE0", &dram_energy) != PAPI_OK); handle_papi_error("error translating event 7\n"); if (PAPI_add_event(nativeEvents, package_energy) != PAPI_OK) handle_papi_error("error adding event 5\n"); if (PAPI_add_event(nativeEvents, pp0_energy) != PAPI_OK) handle_papi_error("error adding event 6\n"); if (PAPI_add_event(nativeEvents, dram_energy) != PAPI_OK) handle_papi_error("error adding event 7\n"); if (PAPI_start(nativeEvents) != PAPI_OK) handle_papi_error("error starting native eventset\n"); }*/
prolong_mex.c
#include <inttypes.h> #include <omp.h> #include "mex.h" #include "prolong_mex.h" void prolongf(float *x, const float *x2, const uint8_t *G, const size_t *sz, const size_t *sz2); void prolongd(double *x, const double *x2, const uint8_t *G, const size_t *sz, const size_t *sz2); #ifdef PROLONG_MEX void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs != 3) || (nlhs > 1)) { mexErrMsgTxt("Usage: prolong_mex(x, x2, G);"); } const uint8_t *G = (const uint8_t *)mxGetData(prhs[2]); const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]); const size_t *sz2 = (const size_t *)mxGetDimensions(prhs[1]); if (mxIsSingle(prhs[0])) { float *x = (float *)mxGetData(prhs[0]); const float *x2 = (const float *)mxGetData(prhs[1]); prolongf(x, x2, G, sz, sz2); } else { double *x = (double *)mxGetData(prhs[0]); const double *x2 = (const double *)mxGetData(prhs[1]); prolongd(x, x2, G, sz, sz2); } if (nlhs == 1) { plhs[0] = mxCreateDoubleScalar(1.0); } return; } #endif void mx_prolong(mxArray *mxx, const mxArray *mxx2, const mxArray *mxG) { const uint8_t *G = (const uint8_t *)mxGetData(mxG); const size_t *sz = (const size_t *)mxGetDimensions(mxx); const size_t *sz2 = (const size_t *)mxGetDimensions(mxx2); if (mxIsSingle(mxx)) { float *x = (float *)mxGetData(mxx); const float *x2 = (const float *)mxGetData(mxx2); prolongf(x, x2, G, sz, sz2); } else { double *x = (double *)mxGetData(mxx); const double *x2 = (const double *)mxGetData(mxx2); const uint8_t *G = (const uint8_t *)mxGetData(mxG); prolongd(x, x2, G, sz, sz2); } return; } void prolongf(float *x, const float *x2, const uint8_t *G, const size_t *sz, const size_t *sz2) { size_t i2, j2, k2; size_t l2, lk2; size_t l, lk; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nx2 = sz2[0]; const size_t ny2 = sz2[1]; const size_t nz2 = sz2[2]; const size_t nxny2 = nx2*ny2; const size_t NX2 = nx2-1; const size_t NY2 = ny2-1; const size_t NZ2 = nz2-1; /* offset indices */ const size_t o110 = 1 + nx + 0; const size_t o101 = 1 + 0 + nxny; const size_t o011 = 0 + nx + nxny; const size_t o111 = 1 + nx + nxny; const size_t o110_2 = 1 + nx2 + 0; const size_t o101_2 = 1 + 0 + nxny2; const size_t o011_2 = 0 + nx2 + nxny2; const size_t o111_2 = 1 + nx2 + nxny2; #pragma omp parallel for private(i2,j2,k2,l2,lk2,l,lk) schedule(static) \ if(nxny*nz > 32*32*32) for(k2 = 1; k2 < NZ2; ++k2) { lk2 = nxny2*k2; lk = nxny*((k2<<1)-1); for(j2 = 1; j2 < NY2; ++j2) { l2 = 1 + nx2*j2 + lk2; l = 1 + nx*((j2<<1)-1) + lk; for(i2 = 1; i2 < NX2; ++i2, ++l2, l += 2) { x[l] = G[l] ? x2[l2] : 0.0f; x[l+1] = G[l+1] ? 0.5f*( x2[l2] + x2[l2+1] ) : 0.0f; x[l+nx] = G[l+nx] ? 0.5f*( x2[l2] + x2[l2+nx2] ) : 0.0f; x[l+nxny] = G[l+nxny] ? 0.5f*( x2[l2] + x2[l2+nxny2] ) : 0.0f; x[l+o110] = G[l+o110] ? 0.25f*( x2[l2] + x2[l2+1] + x2[l2+nx2] + x2[l2+o110_2] ) : 0.0f; x[l+o101] = G[l+o101] ? 0.25f*( x2[l2] + x2[l2+1] + x2[l2+nxny2] + x2[l2+o101_2] ) : 0.0f; x[l+o011] = G[l+o011] ? 0.25f*( x2[l2] + x2[l2+nx2] + x2[l2+nxny2] + x2[l2+o011_2] ) : 0.0f; x[l+o111] = G[l+o111] ? 0.125f*( x2[l2] + x2[l2+1] + x2[l2+nx2] + x2[l2+nxny2] + x2[l2+o110_2] + x2[l2+o101_2] + x2[l2+o011_2] + x2[l2+o111_2] ) : 0.0f; } } } return; } void prolongd(double *x, const double *x2, const uint8_t *G, const size_t *sz, const size_t *sz2) { size_t i2, j2, k2; size_t l2, lk2; size_t l, lk; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nx2 = sz2[0]; const size_t ny2 = sz2[1]; const size_t nz2 = sz2[2]; const size_t nxny2 = nx2*ny2; const size_t NX2 = nx2-1; const size_t NY2 = ny2-1; const size_t NZ2 = nz2-1; /* offset indices */ const size_t o110 = 1 + nx + 0; const size_t o101 = 1 + 0 + nxny; const size_t o011 = 0 + nx + nxny; const size_t o111 = 1 + nx + nxny; const size_t o110_2 = 1 + nx2 + 0; const size_t o101_2 = 1 + 0 + nxny2; const size_t o011_2 = 0 + nx2 + nxny2; const size_t o111_2 = 1 + nx2 + nxny2; #pragma omp parallel for private(i2,j2,k2,l2,lk2,l,lk) schedule(static) \ if(nxny*nz > 32*32*32) for(k2 = 1; k2 < NZ2; ++k2) { lk2 = nxny2*k2; lk = nxny*((k2<<1)-1); for(j2 = 1; j2 < NY2; ++j2) { l2 = 1 + nx2*j2 + lk2; l = 1 + nx*((j2<<1)-1) + lk; for(i2 = 1; i2 < NX2; ++i2, ++l2, l += 2) { x[l] = G[l] ? x2[l2] : 0.0; x[l+1] = G[l+1] ? 0.5*( x2[l2] + x2[l2+1] ) : 0.0; x[l+nx] = G[l+nx] ? 0.5*( x2[l2] + x2[l2+nx2] ) : 0.0; x[l+nxny] = G[l+nxny] ? 0.5*( x2[l2] + x2[l2+nxny2] ) : 0.0; x[l+o110] = G[l+o110] ? 0.25*( x2[l2] + x2[l2+1] + x2[l2+nx2] + x2[l2+o110_2] ) : 0.0; x[l+o101] = G[l+o101] ? 0.25*( x2[l2] + x2[l2+1] + x2[l2+nxny2] + x2[l2+o101_2] ) : 0.0; x[l+o011] = G[l+o011] ? 0.25*( x2[l2] + x2[l2+nx2] + x2[l2+nxny2] + x2[l2+o011_2] ) : 0.0; x[l+o111] = G[l+o111] ? 0.125*( x2[l2] + x2[l2+1] + x2[l2+nx2] + x2[l2+nxny2] + x2[l2+o110_2] + x2[l2+o101_2] + x2[l2+o011_2] + x2[l2+o111_2] ) : 0.0; } } } return; }
GB_unop__identity_uint32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_int16 // op(A') function: GB_unop_tran__identity_uint32_int16 // C type: uint32_t // A type: int16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_int16 ( uint32_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Jacobi1D-DiamondByHand-OMP_dyn.test.c
/****************************************************************************** * Jacobi1D benchmark * Tiled using diamond slabs coded by hand * * Usage: * make omp * export OMP_NUM_THREADS=8 * bin/Jacobi1D-DiamondSlabByHand-OMP \ * `cat src/Jacobi1D-DiamondSlabByHand-OMP.perfexecopts` * For a run on 8 threads ******************************************************************************/ #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <unistd.h> #include <getopt.h> #include <ctype.h> #include <stdbool.h> #include <assert.h> #include "util.h" #define STENCIL(read,write,x) space[write][x] = (space[read][x-1] +\ space[read][x] +\ space[read][x+1])/3; int countTiles( int tiles_start, int upperBound, int stride ){ int x0; int count = 0; for( x0 = tiles_start; x0 <= upperBound; x0 += stride ){ count += 1; } return count; } // main // The steps taken in this code are the following: // 1 - command line parsing // 2 - data allocation and initialization // 3 - jacobi 1D timed within tiling loop // 4 - output and optional verification // int main( int argc, char* argv[] ) { // rather than calling fflush setbuf(stdout, NULL); // 1 - command line parsing Params cmdLineArgs; parseCmdLineArgs(&cmdLineArgs,argc,argv); // 2 - data allocation and initialization // variables required for Jacobi int lowerBound = 1; int upperBound = lowerBound + cmdLineArgs.problemSize - 1; // variables required for tiling int width_min = (cmdLineArgs.width_max + -1 * cmdLineArgs.timeBand) - (0 + 1 * cmdLineArgs.timeBand) +1; // starting point for doing 'A' tiles loops int tiles_A_start = lowerBound - cmdLineArgs.timeBand + 1; // starting point for doing 'B' tiles loop int tiles_B_start = tiles_A_start + cmdLineArgs.width_max; // width between the first x0 point and next x0 point int betweenTiles = width_min + cmdLineArgs.width_max; // assert that this is a valid tile assert( width_min >= 1 && cmdLineArgs.width_max >= width_min ); int count_A_tiles = countTiles( tiles_A_start, upperBound, betweenTiles ); int count_B_tiles = countTiles( tiles_B_start, upperBound, betweenTiles ); int A_tiles_per_core = max( 1, count_A_tiles / cmdLineArgs.cores ); int B_tiles_per_core = max( 1, count_B_tiles / cmdLineArgs.cores ); // allocate time-steps 0 and 1 double* space[2] = { NULL, NULL }; space[0] = (double*) malloc( (cmdLineArgs.problemSize + 2) * sizeof(double)); space[1] = (double*) malloc( (cmdLineArgs.problemSize + 2) * sizeof(double)); if( space[0] == NULL || space[1] == NULL ){ printf( "Could not allocate space array\n" ); exit(0); } // perform first touch in the same manner that the tile will use the data int idx; #pragma omp parallel for private(idx) schedule(dynamic) for( idx = tiles_A_start; idx <= upperBound; idx += betweenTiles ){ if(idx >= lowerBound){ int i; for (i=idx;i<(idx+betweenTiles)&&i<upperBound;i++){ space[0][i] = 0; space[1][i] = 0; } } } // use global seed to seed the random number gen (will be constant) srand(cmdLineArgs.globalSeed); // seed the space. for( idx = lowerBound; idx <= upperBound; ++idx ){ space[0][idx] = rand() / (double)rand(); } // set halo values (sanity) space[0][0] = 0; space[0][upperBound+1] = 0; space[1][0] = 0; space[1][upperBound+1] = 0; int read, write; int tau = cmdLineArgs.tau_runtime; int T = cmdLineArgs.T; int Ui = upperBound; int Li = 1; int thyme=-12345, k1=-12345, t=-12345, i=-12345; //fprintf(stderr,"tau=%d\n", tau); //fprintf(stderr,"%d, %d\n",floord(2, tau)-2, floord(T*2, tau)); // 4 - run the actual test double start_time = omp_get_wtime(); for ( thyme = floord(2, tau)-2; thyme <= floord(T*2, tau); thyme += 1){ #pragma omp parallel for private(k1, t, write, read, i) schedule(dynamic) for ( k1 = (int)(Ui*2/((double) tau)-thyme+1 )/-2; k1 <= (int)( (Li*2/((double) tau))-thyme-1)/ -2 ; k1 += 1){ // printf("%d, %d, %d, %d\n", thyme, k1, t, i); // begin inner loops over points in tile for ( t = max(1, floord(thyme*tau - k1*tau + k1*tau + 1, 2)); t < min(T+1, tau + floord(thyme*tau - k1*tau + k1*tau, 2)); t += 1){ // printf("%d, %d, %d, %d\n", thyme, k1, t, i); write = t & 1; read = 1-write; //read = (t - 1) & 1; //write = 1 - read; for ( i = max(Li, max(thyme*tau - k1*tau - t, -tau - k1*tau + t + 1)); i <=min(Ui, min(tau + thyme*tau - k1*tau - t - 1, -k1*tau + t)); i += 1){ //fprintf(stderr, "%02d, %02d, %d, %d\n", t,i, thyme, k1); //printf("%d, %d\n", t, i); STENCIL( read, write, i ); // (t, i); } // i } // t } // k1 }// thyme //STENCIL( read, write, idx ); // stencil computation double end_time = omp_get_wtime(); double time = (end_time - start_time); // 4 - output and optional verification /* printf( "p: %d, T: %d, c: %d",cmdLineArgs.problemSize,cmdLineArgs.T, cmdLineArgs.cores); */ if( cmdLineArgs.printtime ){ printf( "Time: %f", time ); } if( cmdLineArgs.verify ){ if(!verifyResultJacobi1D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize, cmdLineArgs.globalSeed,cmdLineArgs.T )){ fprintf(stderr,"FAILURE\n"); }else{ fprintf(stderr,"SUCCESS\n"); } } return 1; }
sxc_fmt_plug.c
/* SXC cracker patch for JtR. Hacked together during Summer of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_sxc); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "sha.h" #include <openssl/blowfish.h> #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "sxc" #define FORMAT_NAME "StarOffice .sxc" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " Blowfish" #else #define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int cipher_type; // FIXME: cipher_type seems to be ignored int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$sxc$*", 6)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p) != BINARY_SIZE * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p) != res * 2) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[MAX_KEYS_PER_CRYPT][32]; unsigned char hash[MAX_KEYS_PER_CRYPT][32]; BF_KEY bf_key; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; int i; SHA_CTX ctx; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i])); SHA1_Final((unsigned char *)hash[i], &ctx); } #ifdef SIMD_COEF_32 { int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 20; pin[i] = (unsigned char*)hash[i]; pout[i] = key[i]; } pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, cur_salt->key_size, 0); } #else pbkdf2_sha1(hash[0], 20, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, key[0], cur_salt->key_size, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, key[i]); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index+i], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void sxc_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, sxc_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, sxc_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
nodal_two_step_v_p_strategy_for_FSI.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2018 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H #define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_for_FSI.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include "nodal_two_step_v_p_strategy.h" #include "nodal_two_step_v_p_strategy_for_FSI.h" #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace, class TLinearSolver > class NodalTwoStepVPStrategyForFSI : public NodalTwoStepVPStrategy<TSparseSpace,TDenseSpace,TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategyForFSI); /// Counted pointer of NodalTwoStepVPStrategy //typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; /// Node type (default is: Node<3>) typedef Node <3> NodeType; /// Geometry type (using with given NodeType) typedef Geometry<NodeType> GeometryType; typedef std::size_t SizeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mVelocityTolerance; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mPressureTolerance; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mMaxPressureIter; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mDomainSize; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mTimeOrder; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mReformDofSet; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpMomentumStrategy; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpPressureStrategy; typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType; typedef GlobalPointersVector<Node<3> > NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ NodalTwoStepVPStrategyForFSI(ModelPart& rModelPart, SolverSettingsType& rSolverConfig): BaseType(rModelPart) { NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::InitializeStrategy(rSolverConfig); } NodalTwoStepVPStrategyForFSI(ModelPart& rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1,// Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2): BaseType(rModelPart, pVelocityLinearSolver, pPressureLinearSolver, ReformDofSet, VelTol, PresTol, MaxPressureIterations, TimeOrder, DomainSize) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverForFSI<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); vel_build->SetCalculateReactionsFlag(false); BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI<TSparseSpace, TDenseSpace, TLinearSolver > (pPressureLinearSolver)); this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~NodalTwoStepVPStrategyForFSI(){} double Solve() override { // Initialize BDF2 coefficients ModelPart& rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); double NormDp = 0.0; ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged= rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; bool momentumAlreadyConverged=false; bool continuityAlreadyConverged=false; unsigned int maxNonLinearIterations=mMaxPressureIter; std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t="<< currentTime<<"s"<<std::endl; if(timeIntervalChanged==true && currentTime>10*timeInterval ){ maxNonLinearIterations*=2; } if(currentTime<10*timeInterval){ if ( BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3"<< std::endl; maxNonLinearIterations*=3; } if(currentTime<20*timeInterval && currentTime>=10*timeInterval){ if ( BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2"<< std::endl; maxNonLinearIterations*=2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep=false; /* boost::timer solve_step_time; */ std::cout<<" InitializeSolutionStep().... "<<std::endl; InitializeSolutionStep(); // it fills SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids and inner solids for(unsigned int it = 0; it < maxNonLinearIterations; ++it) { if ( BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; std::cout << "----- > iteration: " << it << std::endl; if(it==0){ ComputeNodalVolumeAndAssignFlagToElementType(); // it assings NODAL_VOLUME to fluid and SOLID_NODAL_VOLUME to solid. Interface nodes have both this->InitializeNonLinearIterations(); // it fills SOLID_NODAL_SFD_NEIGHBOURS for solids and NODAL_SFD_NEIGHBOURS for fluids } std::cout<<" CalcNodalStrainsAndStresses .... "<<std::endl; CalcNodalStrainsAndStresses(); // it computes stresses and strains for fluid and solid nodes std::cout<<" CalcNodalStrainsAndStresses DONE "<<std::endl; momentumConverged = this->SolveMomentumIteration(it,maxNonLinearIterations,fixedTimeStep); UpdateTopology(rModelPart, BaseType::GetEchoLevel()); std::cout<<" ComputeNodalVolume .... "<<std::endl; ComputeNodalVolume(); std::cout<<" ComputeNodalVolume DONE "<<std::endl; this->InitializeNonLinearIterations(); std::cout<<" InitializeNonLinearIterations DONE "<<std::endl; CalcNodalStrains(); std::cout<<" CalcNodalStrains DONE "<<std::endl; if( fixedTimeStep==false){ continuityConverged = this->SolveContinuityIteration(it,maxNonLinearIterations); } // if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // momentumAlreadyConverged=true; // } // if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // continuityAlreadyConverged=true; // } if(it==maxNonLinearIterations-1 || ((continuityConverged && momentumConverged) && it>1)){ //this->ComputeErrorL2NormCaseImposedG(); //this->ComputeErrorL2NormCasePoiseuille(); this->CalculateAccelerations(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } if ( (continuityConverged && momentumConverged) && it>1) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE,false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE,false); std::cout << "nodal V-P strategy converged in " << it+1 << " iterations." << std::endl; break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ return NormDp; } void Initialize() override { std::cout<<" Initialize in nodal_two_step_v_p_strategy"<<std::endl; ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains=3*(dimension-1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes=neighb_nodes.size(); unsigned int sizeSDFNeigh=neighbourNodes*dimension; if(itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)){ Vector& rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if(rNodalStress.size() != sizeStrains){ rNodalStress.resize(sizeStrains,false); } noalias(rNodalStress) = ZeroVector(sizeStrains); }else{ std::cout<<"THIS node does not have NODAL_CAUCHY_STRESS... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)){ Vector& rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if(rNodalStress.size() != sizeStrains){ rNodalStress.resize(sizeStrains,false); } noalias(rNodalStress) = ZeroVector(sizeStrains); }else{ std::cout<<"THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_VOLUME)){ itNode->FastGetSolutionStepValue(NODAL_VOLUME)=0; }else{ std::cout<<"THIS node does not have NODAL_VOLUME... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)){ itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE)=0; }else{ std::cout<<"THIS node does not have NODAL_MEAN_MESH_SIZE... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)){ itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA)=0; }else{ std::cout<<"THIS node does not have NODAL_FREESURFACE_AREA... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)){ Vector& rNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if(rNodalSFDneighbours.size() != sizeSDFNeigh){ rNodalSFDneighbours.resize(sizeSDFNeigh,false); } noalias(rNodalSFDneighbours)=ZeroVector(sizeSDFNeigh); }else{ std::cout<<"THIS node does not have NODAL_SFD_NEIGHBOURS... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)){ Vector& rSpatialDefRate=itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if(rSpatialDefRate.size() != sizeStrains){ rSpatialDefRate.resize(sizeStrains,false); } noalias(rSpatialDefRate)=ZeroVector(sizeStrains); }else{ std::cout<<"THIS node does not have NODAL_SPATIAL_DEF_RATE... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)){ Matrix& rFgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if(rFgrad.size1() != dimension){ rFgrad.resize(dimension,dimension,false); } noalias(rFgrad)=ZeroMatrix(dimension,dimension); }else{ std::cout<<"THIS node does not have NODAL_DEFORMATION_GRAD... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)){ Matrix& rFgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if(rFgradVel.size1() != dimension){ rFgradVel.resize(dimension,dimension,false); } noalias(rFgradVel)=ZeroMatrix(dimension,dimension); }else{ std::cout<<"THIS node does not have NODAL_DEFORMATION_GRAD_VEL... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS)){ Vector& rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); if(rSolidNodalStress.size() != sizeStrains){ rSolidNodalStress.resize(sizeStrains,false); } noalias(rSolidNodalStress) = ZeroVector(sizeStrains); }else{ std::cout<<"THIS node does not have SOLID_NODAL_CAUCHY_STRESS... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS)){ Vector& rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); if(rSolidNodalStress.size() != sizeStrains){ rSolidNodalStress.resize(sizeStrains,false); } noalias(rSolidNodalStress) = ZeroVector(sizeStrains); }else{ std::cout<<"THIS node does not have SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME)=0; }else{ std::cout<<"THIS node does not have SOLID_NODAL_VOLUME... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE)=0; }else{ std::cout<<"THIS node does not have SOLID_NODAL_MEAN_MESH_SIZE... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA)=0; }else{ std::cout<<"THIS node does not have SOLID_NODAL_FREESURFACE_AREA... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS)){ Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); if(rSolidNodalSFDneighbours.size() != sizeSDFNeigh){ rSolidNodalSFDneighbours.resize(sizeSDFNeigh,false); } noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSDFNeigh); }else{ std::cout<<"THIS node does not have SOLID_NODAL_SFD_NEIGHBOURS... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE)){ Vector& rSolidSpatialDefRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); if(rSolidSpatialDefRate.size() != sizeStrains){ rSolidSpatialDefRate.resize(sizeStrains,false); } noalias(rSolidSpatialDefRate)=ZeroVector(sizeStrains); }else{ std::cout<<"THIS node does not have SOLID_NODAL_SPATIAL_DEF_RATE... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD)){ Matrix& rSolidFgrad=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); if(rSolidFgrad.size1() != dimension){ rSolidFgrad.resize(dimension,dimension,false); } noalias(rSolidFgrad)=ZeroMatrix(dimension,dimension); }else{ std::cout<<"THIS node does not have SOLID_NODAL_DEFORMATION_GRAD... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL)){ Matrix& rSolidFgradVel=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if(rSolidFgradVel.size1() != dimension){ rSolidFgradVel.resize(dimension,dimension,false); } noalias(rSolidFgradVel)=ZeroMatrix(dimension,dimension); }else{ std::cout<<"THIS node does not have SOLID_NODAL_DEFORMATION_GRAD_VEL... "<<itNode->X()<<" "<<itNode->Y()<<std::endl; } AssignMaterialToEachNode(itNode); } // } } void AssignMaterialToEachNode(ModelPart::NodeIterator itNode) { ModelPart& rModelPart = BaseType::GetModelPart(); ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff=0; double volumetricCoeff=0; if(itNode->Is(SOLID)) { double youngModulus=itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio=itNode->FastGetSolutionStepValue(POISSON_RATIO); double solidDensity=itNode->FastGetSolutionStepValue(DENSITY); itNode->FastGetSolutionStepValue(SOLID_DENSITY)=solidDensity; deviatoricCoeff = timeInterval*youngModulus/(1.0+poissonRatio)*0.5; volumetricCoeff = timeInterval*poissonRatio*youngModulus/((1.0+poissonRatio)*(1.0-2.0*poissonRatio)) + 2.0*deviatoricCoeff/3.0; } else if(itNode->Is(FLUID) || itNode->Is(RIGID) ) { deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); volumetricCoeff = timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS); } if((itNode->Is(SOLID) && itNode->Is(RIGID))) { itNode->FastGetSolutionStepValue(INTERFACE_NODE)=true; }else{ itNode->FastGetSolutionStepValue(INTERFACE_NODE)=false; } double currFirstLame=volumetricCoeff - 2.0*deviatoricCoeff/3.0; itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT)=currFirstLame; itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT)=deviatoricCoeff; } void ComputeNodalVolume() { ModelPart& rModelPart = BaseType::GetModelPart(); ElementsArrayType& pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType& geometry = itElem->GetGeometry(); double elementalVolume=0; if(dimension==2){ elementalVolume=geometry.Area()/3.0; }else if(dimension==3){ elementalVolume=geometry.Volume()*0.25; } // index = 0; unsigned int numNodes=geometry.size(); for (unsigned int i = 0; i <numNodes; i++) { double& nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; if(itElem->Is(SOLID)) { double& solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); solidVolume += elementalVolume; nodalVolume += -elementalVolume; // if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before // nodalVolume += -elementalVolume; // } } } } // } } void ComputeNodalVolumeAndAssignFlagToElementType() { ModelPart& rModelPart = BaseType::GetModelPart(); ElementsArrayType& pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; double solidDensity=0; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType& geometry = itElem->GetGeometry(); double elementalVolume=0; if(dimension==2){ elementalVolume=geometry.Area()/3.0; }else if(dimension==3){ elementalVolume=geometry.Volume()*0.25; } // index = 0; unsigned int numNodes=geometry.size(); unsigned int fluidNodes=0; unsigned int solidNodes=0; unsigned int interfaceNodes=0; for (unsigned int i = 0; i <numNodes; i++) { if((geometry(i)->Is(FLUID) && geometry(i)->IsNot(SOLID)) || (geometry(i)->Is(FLUID) && geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true)){ fluidNodes+=1; } if(geometry(i)->Is(SOLID)){ solidNodes+=1; } if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){ interfaceNodes+=1; } if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==false && geometry(i)->Is(SOLID)){ solidDensity=geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY); } } if(solidNodes==numNodes){ itElem->Set(SOLID); // std::cout<<"THIS SOLID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if(interfaceNodes==numNodes){ itElem->Set(SOLID); // std::cout<<"THIS INTERFACE ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if(fluidNodes==numNodes){ itElem->Set(FLUID); // std::cout<<"THIS FLUID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if(solidNodes==numNodes && fluidNodes==numNodes){ itElem->Reset(FLUID); std::cout<<"THIS ELEMENT WAS BOTH FLUID AND SOLID "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } for (unsigned int i = 0; i <numNodes; i++) { double& nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; if(itElem->Is(SOLID)){ geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=solidDensity; double& solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); solidVolume+=elementalVolume; nodalVolume += -elementalVolume; // if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before // nodalVolume += -elementalVolume; // } if(interfaceNodes==numNodes && solidDensity==0){ std::cout<<"This interface element has not a correct density....I am assigning it the fluid density----- TODO: IMPROVE IT, TAKE FROM NEIGHBOURS"<<std::endl; double density=geometry(i)->FastGetSolutionStepValue(DENSITY); geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=density; } } } } // } } void InitializeSolutionStep() override { FillNodalSFDVector(); } void FillNodalSFDVector() { std::cout << "FillNodalSFDVector(); ... " << std::endl; ModelPart& rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { // ModelPart::NodeIterator NodesBegin; // ModelPart::NodeIterator NodesEnd; // OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); // for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) // { for(ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){ this->SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER if(itNode->Is(SOLID)){ SetNeighboursOrderToSolidNode(itNode); // it assigns neighbours to solid inner nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER } }else{ SetNeighboursOrderToInterfaceNode(itNode); // it assigns neighbours to interface nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids } } // } std::cout << "FillNodalSFDVector(); DONE " << std::endl; } void SetNeighboursOrderToSolidNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes=neighb_nodes.size()+1; // +1 becausealso the node itself must be considered as nieghbor node Vector& rNodeOrderedNeighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if(rNodeOrderedNeighbours.size() != neighbourNodes) rNodeOrderedNeighbours.resize(neighbourNodes,false); noalias(rNodeOrderedNeighbours)=ZeroVector(neighbourNodes); rNodeOrderedNeighbours[0]=itNode->Id(); if(neighbourNodes>1) { for(unsigned int k = 0; k< neighbourNodes-1; k++) { rNodeOrderedNeighbours[k+1]=neighb_nodes[k].Id(); } } } void SetNeighboursOrderToInterfaceNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes=neighb_nodes.size()+1; unsigned int fluidCounter=1; unsigned int solidCounter=1; if(neighbourNodes>1) { for(unsigned int k = 0; k< neighbourNodes-1; k++) { if(neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE)==true){ fluidCounter+=1; } if(neighb_nodes[k].Is(SOLID)){ solidCounter+=1; } } } Vector& rFluidNodeOrderedNeighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector& rSolidNodeOrderedNeighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if(rFluidNodeOrderedNeighbours.size() != fluidCounter) rFluidNodeOrderedNeighbours.resize(fluidCounter,false); if(rSolidNodeOrderedNeighbours.size() != solidCounter) rSolidNodeOrderedNeighbours.resize(solidCounter,false); noalias(rFluidNodeOrderedNeighbours)=ZeroVector(fluidCounter); noalias(rSolidNodeOrderedNeighbours)=ZeroVector(solidCounter); rFluidNodeOrderedNeighbours[0]=itNode->Id(); rSolidNodeOrderedNeighbours[0]=itNode->Id(); fluidCounter=0; solidCounter=0; if(neighbourNodes>1) { for(unsigned int k = 0; k< neighbourNodes-1; k++) { if(neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE)==true){ fluidCounter+=1; rFluidNodeOrderedNeighbours[fluidCounter]=neighb_nodes[k].Id(); } if(neighb_nodes[k].Is(SOLID)){ solidCounter+=1; rSolidNodeOrderedNeighbours[solidCounter]=neighb_nodes[k].Id(); } } } fluidCounter+=1; solidCounter+=1; ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const unsigned int sizeFluidSDFNeigh=fluidCounter*dimension; const unsigned int sizeSolidSDFNeigh=solidCounter*dimension; Vector& rFluidNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); if(rFluidNodalSFDneighbours.size() != sizeFluidSDFNeigh) rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,false); if(rSolidNodalSFDneighbours.size() != sizeSolidSDFNeigh) rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,false); noalias(rFluidNodalSFDneighbours)=ZeroVector(sizeFluidSDFNeigh); noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSolidSDFNeigh); // rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,true); // rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,true); // std::cout<<"rFluidNodeOrderedNeighbours "<<rFluidNodeOrderedNeighbours<<std::endl; // std::cout<<"rSolidNodeOrderedNeighbours "<<rSolidNodeOrderedNeighbours<<std::endl; // std::cout<<"rFluidNodalSFDneighbours "<<rFluidNodalSFDneighbours<<std::endl; // std::cout<<"rSolidNodalSFDneighbours "<<rSolidNodalSFDneighbours<<std::endl; } void InitializeNodalVariablesForSolidRemeshedDomain(ModelPart::NodeIterator itNode) { ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains=3*(dimension-1); NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes=neighb_nodes.size()+1; unsigned int sizeSDFNeigh=neighbourNodes*dimension; if(itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS)){ Vector& rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); if(rSolidNodalStress.size() != sizeStrains) rSolidNodalStress.resize(sizeStrains,false); noalias(rSolidNodalStress) = ZeroVector(sizeStrains); } if(itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS)){ Vector& rSolidNodalDevStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); if(rSolidNodalDevStress.size() != sizeStrains) rSolidNodalDevStress.resize(sizeStrains,false); noalias(rSolidNodalDevStress) = ZeroVector(sizeStrains); } if(itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS)){ Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); if(rSolidNodalSFDneighbours.size() != sizeSDFNeigh) rSolidNodalSFDneighbours.resize(sizeSDFNeigh,false); noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSDFNeigh); } if(itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS_ORDER)){ Vector& rSolidNodalSFDneighboursOrder=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if(rSolidNodalSFDneighboursOrder.size() != neighbourNodes) rSolidNodalSFDneighboursOrder.resize(neighbourNodes,false); noalias(rSolidNodalSFDneighboursOrder)=ZeroVector(neighbourNodes); } if(itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE)){ Vector& rSolidSpatialDefRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); if(rSolidSpatialDefRate.size() != sizeStrains) rSolidSpatialDefRate.resize(sizeStrains,false); noalias(rSolidSpatialDefRate)=ZeroVector(sizeStrains); } if(itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD)){ Matrix& rSolidFgrad=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); if(rSolidFgrad.size1() != dimension) rSolidFgrad.resize(dimension,dimension,false); noalias(rSolidFgrad)=ZeroMatrix(dimension,dimension); } if(itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL)){ Matrix& rSolidFgradVel=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if(rSolidFgradVel.size1() != dimension) rSolidFgradVel.resize(dimension,dimension,false); noalias(rSolidFgradVel)=ZeroMatrix(dimension,dimension); } if(itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME)=0; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE)=0; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA)=0; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUMETRIC_DEF_RATE)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=0; } if(itNode->SolutionStepsDataHas(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)){ itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=0; } } void CalcNodalStrainsAndStresses() { ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME); double solidNodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); double theta=0.5; if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ if(nodalVolume>0){ Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); Matrix& interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix& interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if(interfaceFgrad.size1() != dimension) interfaceFgrad.resize(dimension,dimension,false); if(interfaceFgradVel.size1() != dimension) interfaceFgradVel.resize(dimension,dimension,false); noalias(interfaceFgrad) = ZeroMatrix(dimension,dimension); noalias(interfaceFgradVel) = ZeroMatrix(dimension,dimension); //I have to compute the stresses and strains two times because one time is for the solid and the other for the fluid // Matrix interfaceFgrad=ZeroMatrix(dimension,dimension); // Matrix interfaceFgradVel=ZeroMatrix(dimension,dimension); //the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes. ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel); // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel; CalcNodalStrainsAndStressesForInterfaceFluidNode(itNode); } if(solidNodalVolume>0){ Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix& solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix& solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if(solidInterfaceFgrad.size1() != dimension) solidInterfaceFgrad.resize(dimension,dimension,false); if(solidInterfaceFgradVel.size1() != dimension) solidInterfaceFgradVel.resize(dimension,dimension,false); noalias(solidInterfaceFgrad)=ZeroMatrix(dimension,dimension); noalias(solidInterfaceFgradVel)=ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgrad=ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgradVel=ZeroMatrix(dimension,dimension); ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel); // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad; // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel; CalcNodalStrainsAndStressesForInterfaceSolidNode(itNode); } } else{ if(itNode->Is(SOLID) && solidNodalVolume>0){ ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta); CalcNodalStrainsAndStressesForSolidNode(itNode); }else if(nodalVolume>0){ this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsAndStressesForNode(itNode); } } if(nodalVolume==0 && solidNodalVolume==0){ // if nodalVolume==0 this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); } // } // if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){ // CopyValuesToSolidNonInterfaceNodes(itNode); // } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void CopyValuesToSolidNonInterfaceNodes(ModelPart::NodeIterator itNode) { Vector& solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector& solidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix& solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix& solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); Vector& solidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); double& volumetricDefRate = itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE); Vector& solidCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); Vector& solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); unsigned int sizeNodalSFDneighboursId=nodalSFDneighboursId.size(); solidNodalSFDneighboursId.resize(sizeNodalSFDneighboursId,false); Vector nodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeNodalSFDneigh=nodalSFDneigh.size(); solidNodalSFDneigh.resize(sizeNodalSFDneigh,false); solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); solidNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); solidInterfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); solidSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE); solidCauchyStress = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS); solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); } void CalcNodalStrainsAndStressesForInterfaceFluidNode(ModelPart::NodeIterator itNode) { ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; // double currFirstLame=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT); // double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double deviatoricCoeff =itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0){ double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } double currFirstLame=timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS); Matrix Fgrad=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad=1.0; Matrix InvFgrad=ZeroMatrix(dimension,dimension); Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); if(dimension==2){ MathUtils< double>::InvertMatrix2(Fgrad,InvFgrad,detFgrad); }else if(dimension==3){ MathUtils< double>::InvertMatrix3(Fgrad,InvFgrad,detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad=prod(FgradVel,InvFgrad); if(dimension==2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt((2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0){ // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } double DefVol=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]+itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; double nodalSigmaTot_xx= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol/3.0); double nodalSigmaDev_yy= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol/3.0); double nodalSigmaDev_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; if(itNode->Is(SOLID)) { nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[0]=nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[1]=nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[2]=nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[0]=nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[1]=nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[2]=nodalSigmaDev_xy; }else if (dimension==3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=SpatialVelocityGrad(2,2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]=0.5*(SpatialVelocityGrad(2,0)+SpatialVelocityGrad(0,2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]=0.5*(SpatialVelocityGrad(2,1)+SpatialVelocityGrad(1,2)); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0){ itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt(2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] ); double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0){ // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } double DefVol=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; double nodalSigmaTot_xx= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol/3.0); double nodalSigmaDev_yy= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol/3.0); double nodalSigmaDev_zz= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol/3.0); double nodalSigmaDev_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; if(itNode->Is(SOLID)) { nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3]; nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4]; nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5]; nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3]; nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4]; nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[0]=nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[1]=nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[2]=nodalSigmaTot_zz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[3]=nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[4]=nodalSigmaTot_xz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[5]=nodalSigmaTot_yz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[0]=nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[1]=nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[2]=nodalSigmaDev_zz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[3]=nodalSigmaDev_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[4]=nodalSigmaDev_xz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[5]=nodalSigmaDev_yz; } } void CalcNodalStrainsAndStressesForInterfaceSolidNode(ModelPart::NodeIterator itNode) { ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double currFirstLame=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT); double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); Matrix Fgrad=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad=1.0; Matrix InvFgrad=ZeroMatrix(dimension,dimension); Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); if(dimension==2){ MathUtils< double>::InvertMatrix2(Fgrad,InvFgrad,detFgrad); }else if(dimension==3){ MathUtils< double>::InvertMatrix3(Fgrad,InvFgrad,detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad=prod(FgradVel,InvFgrad); if(dimension==2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt((2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0){ // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } double DefVol=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]+itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; double nodalSigmaTot_xx= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol/3.0); double nodalSigmaDev_yy= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol/3.0); double nodalSigmaDev_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; if(itNode->Is(SOLID)) { nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[0]=nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[1]=nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[2]=nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[0]=nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[1]=nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[2]=nodalSigmaDev_xy; }else if (dimension==3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=SpatialVelocityGrad(2,2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]=0.5*(SpatialVelocityGrad(2,0)+SpatialVelocityGrad(0,2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]=0.5*(SpatialVelocityGrad(2,1)+SpatialVelocityGrad(1,2)); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0){ itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt(2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] ); double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0){ // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } double DefVol=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; double nodalSigmaTot_xx= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol/3.0); double nodalSigmaDev_yy= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol/3.0); double nodalSigmaDev_zz= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol/3.0); double nodalSigmaDev_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; if(itNode->Is(SOLID)) { nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3]; nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4]; nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5]; nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3]; nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4]; nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[0]=nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[1]=nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[2]=nodalSigmaTot_zz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[3]=nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[4]=nodalSigmaTot_xz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[5]=nodalSigmaTot_yz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[0]=nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[1]=nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[2]=nodalSigmaDev_zz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[3]=nodalSigmaDev_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[4]=nodalSigmaDev_xz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[5]=nodalSigmaDev_yz; } } void CalcNodalStrainsAndStressesForSolidNode(ModelPart::NodeIterator itNode) { ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double currFirstLame=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT); double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); Matrix Fgrad=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad=1.0; Matrix InvFgrad=ZeroMatrix(dimension,dimension); Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); if(dimension==2){ MathUtils< double>::InvertMatrix2(Fgrad,InvFgrad,detFgrad); }else if(dimension==3){ MathUtils< double>::InvertMatrix3(Fgrad,InvFgrad,detFgrad); } // if(itNode->Is(SOLID)){ // std::cout<<"solid node"<<std::endl; // } // if(itNode->Is(FLUID)){ // std::cout<<"FLUID node"<<std::endl; // } // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // std::cout<<"currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl; // }else{ // std::cout<<"NOT INTERFACE currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl; // } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad=prod(FgradVel,InvFgrad); if(dimension==2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt((2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0){ // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } double DefVol=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]+itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; double nodalSigmaTot_xx= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol/3.0); double nodalSigmaDev_yy= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol/3.0); double nodalSigmaDev_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; if(itNode->Is(SOLID)) { nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[0]=nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[1]=nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[2]=nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[0]=nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[1]=nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[2]=nodalSigmaDev_xy; }else if (dimension==3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=SpatialVelocityGrad(2,2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]=0.5*(SpatialVelocityGrad(2,0)+SpatialVelocityGrad(0,2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]=0.5*(SpatialVelocityGrad(2,1)+SpatialVelocityGrad(1,2)); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0){ itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt(2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] ); double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0){ // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } double DefVol=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; double nodalSigmaTot_xx= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz= currFirstLame*DefVol + 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol/3.0); double nodalSigmaDev_yy= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol/3.0); double nodalSigmaDev_zz= 2.0*deviatoricCoeff*(itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol/3.0); double nodalSigmaDev_xy= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz= 2.0*deviatoricCoeff*itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; if(itNode->Is(SOLID)) { nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3]; nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4]; nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5]; nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3]; nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4]; nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[0]=nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[1]=nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[2]=nodalSigmaTot_zz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[3]=nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[4]=nodalSigmaTot_xz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,0)[5]=nodalSigmaTot_yz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[0]=nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[1]=nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[2]=nodalSigmaDev_zz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[3]=nodalSigmaDev_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[4]=nodalSigmaDev_xz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,0)[5]=nodalSigmaDev_yz; } } void CalcNodalStrainsForSolidNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); // Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); // double detFgrad=1.0; // Matrix InvFgrad=ZeroMatrix(dimension,dimension); // Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); double detFgrad=1.0; Matrix nodalFgrad = ZeroMatrix(dimension,dimension); Matrix FgradVel = ZeroMatrix(dimension,dimension); Matrix InvFgrad = ZeroMatrix(dimension,dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension,dimension); nodalFgrad=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); FgradVel=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); //Inverse if(dimension==2){ MathUtils< double>::InvertMatrix2(nodalFgrad,InvFgrad,detFgrad); }else if(dimension==3){ MathUtils< double>::InvertMatrix3(nodalFgrad,InvFgrad,detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad=prod(FgradVel,InvFgrad); if(dimension==2){ itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt((2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double DefX=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefVol=DefX+DefY; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; }else if (dimension==3){ itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=SpatialVelocityGrad(2,2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]=0.5*(SpatialVelocityGrad(2,0)+SpatialVelocityGrad(0,2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]=0.5*(SpatialVelocityGrad(2,1)+SpatialVelocityGrad(1,2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt(2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] ); double DefX=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefZ=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double DefVol=DefX+DefY+DefZ; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; } } void CalcNodalStrainsForInterfaceSolidNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Matrix Fgrad=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel=itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad=1.0; Matrix InvFgrad=ZeroMatrix(dimension,dimension); Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); //Inverse if(dimension==2){ MathUtils< double>::InvertMatrix2(Fgrad,InvFgrad,detFgrad); }else if(dimension==3){ MathUtils< double>::InvertMatrix3(Fgrad,InvFgrad,detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad=prod(FgradVel,InvFgrad); if(dimension==2){ itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt((2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double DefX=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefVol=DefX+DefY; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; }else if (dimension==3){ itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]=SpatialVelocityGrad(0,0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]=SpatialVelocityGrad(1,1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]=SpatialVelocityGrad(2,2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]=0.5*(SpatialVelocityGrad(1,0)+SpatialVelocityGrad(0,1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]=0.5*(SpatialVelocityGrad(2,0)+SpatialVelocityGrad(0,2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]=0.5*(SpatialVelocityGrad(2,1)+SpatialVelocityGrad(1,2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=sqrt(2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]*itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] ); double DefX=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefZ=itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double DefVol=DefX+DefY+DefZ; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=DefVol; } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void CalcNodalStrains() { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME); double solidNodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); double theta=1.0; if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ if(nodalVolume>0 ){ //I have to compute the strains two times because one time is for the solid and the other for the fluid Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); Matrix& interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix& interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if(interfaceFgrad.size1() != dimension) interfaceFgrad.resize(dimension,dimension,false); if(interfaceFgradVel.size1() != dimension) interfaceFgradVel.resize(dimension,dimension,false); noalias(interfaceFgrad)=ZeroMatrix(dimension,dimension); noalias(interfaceFgradVel)=ZeroMatrix(dimension,dimension); // Matrix interfaceFgrad = ZeroMatrix(dimension,dimension); // Matrix interfaceFgradVel = ZeroMatrix(dimension,dimension); //the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes. ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel); // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel; this->CalcNodalStrainsForNode(itNode); } if(solidNodalVolume>0){ Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix& solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix& solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if(solidInterfaceFgrad.size1() != dimension) solidInterfaceFgrad.resize(dimension,dimension,false); if(solidInterfaceFgradVel.size1() != dimension) solidInterfaceFgradVel.resize(dimension,dimension,false); noalias(solidInterfaceFgrad)=ZeroMatrix(dimension,dimension); noalias(solidInterfaceFgradVel)=ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgrad = ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgradVel = ZeroMatrix(dimension,dimension); ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel); // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad; // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel; CalcNodalStrainsForInterfaceSolidNode(itNode); } } else{ if(itNode->Is(SOLID) && solidNodalVolume>0){ ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta); CalcNodalStrainsForSolidNode(itNode); }else if(nodalVolume>0){ this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsForNode(itNode); } } if(nodalVolume==0 && solidNodalVolume==0){ // if nodalVolume==0 this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); } // if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){ // CopyValuesToSolidNonInterfaceNodes(itNode); // } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void ComputeAndStoreNodalDeformationGradientForSolidNode(ModelPart::NodeIterator itNode, double theta) { KRATOS_TRY; ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); Matrix Fgrad=ZeroMatrix(dimension,dimension); Matrix FgradVel=ZeroMatrix(dimension,dimension); NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); if(dimension==2) { double dNdXi=rNodalSFDneigh[0]; double dNdYi=rNodalSFDneigh[1]; Fgrad(0,0)+=dNdXi*itNode->X(); Fgrad(0,1)+=dNdYi*itNode->X(); Fgrad(1,0)+=dNdXi*itNode->Y(); Fgrad(1,1)+=dNdYi*itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; unsigned int firstRow=2; if(neighSize>0) { for (unsigned int i = 0; i< neighSize-1; i++)//neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { dNdXi=rNodalSFDneigh[firstRow]; dNdYi=rNodalSFDneigh[firstRow+1]; unsigned int neigh_nodes_id= neighb_nodes[i].Id(); unsigned int other_neigh_nodes_id=nodalSFDneighboursId[i+1]; if(neigh_nodes_id!=other_neigh_nodes_id) std::cout<<"neigh_nodes_id "<<neigh_nodes_id<<" other_neigh_nodes_id "<<other_neigh_nodes_id<< std::endl; Fgrad(0,0)+=dNdXi*neighb_nodes[i].X(); Fgrad(0,1)+=dNdYi*neighb_nodes[i].X(); Fgrad(1,0)+=dNdXi*neighb_nodes[i].Y(); Fgrad(1,1)+=dNdYi*neighb_nodes[i].Y(); VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X,0)*theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y,0)*theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; firstRow+=2; } } }else{ double dNdXi=rNodalSFDneigh[0]; double dNdYi=rNodalSFDneigh[1]; double dNdZi=rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_Z,1)*(1-theta); Fgrad(0,0)+=dNdXi*itNode->X(); Fgrad(0,1)+=dNdYi*itNode->X(); Fgrad(0,2)+=dNdZi*itNode->X(); Fgrad(1,0)+=dNdXi*itNode->Y(); Fgrad(1,1)+=dNdYi*itNode->Y(); Fgrad(1,2)+=dNdZi*itNode->Y(); Fgrad(2,0)+=dNdXi*itNode->Z(); Fgrad(2,1)+=dNdYi*itNode->Z(); Fgrad(2,2)+=dNdZi*itNode->Z(); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(0,2)+=dNdZi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; FgradVel(1,2)+=dNdZi*VelocityY; FgradVel(2,0)+=dNdXi*VelocityZ; FgradVel(2,1)+=dNdYi*VelocityZ; FgradVel(2,2)+=dNdZi*VelocityZ; unsigned int firstRow=3; if(neighSize>0){ for (unsigned int i = 0; i< neighSize-1; i++) { dNdXi=rNodalSFDneigh[firstRow]; dNdYi=rNodalSFDneigh[firstRow+1]; dNdZi=rNodalSFDneigh[firstRow+2]; VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X,0)*theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y,0)*theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z,0)*theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z,1)*(1-theta); Fgrad(0,0)+=dNdXi*neighb_nodes[i].X(); Fgrad(0,1)+=dNdYi*neighb_nodes[i].X(); Fgrad(0,2)+=dNdZi*neighb_nodes[i].X(); Fgrad(1,0)+=dNdXi*neighb_nodes[i].Y(); Fgrad(1,1)+=dNdYi*neighb_nodes[i].Y(); Fgrad(1,2)+=dNdZi*neighb_nodes[i].Y(); Fgrad(2,0)+=dNdXi*neighb_nodes[i].Z(); Fgrad(2,1)+=dNdYi*neighb_nodes[i].Z(); Fgrad(2,2)+=dNdZi*neighb_nodes[i].Z(); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(0,2)+=dNdZi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; FgradVel(1,2)+=dNdZi*VelocityY; FgradVel(2,0)+=dNdXi*VelocityZ; FgradVel(2,1)+=dNdYi*VelocityZ; FgradVel(2,2)+=dNdZi*VelocityZ; firstRow+=3; } } } itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=Fgrad; itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=FgradVel; KRATOS_CATCH(""); } void ComputeAndStoreNodalDeformationGradientForInterfaceNode(ModelPart::NodeIterator itNode, Vector nodalSFDneighboursId, Vector rNodalSFDneigh, double theta, Matrix& Fgrad, Matrix& FgradVel){ KRATOS_TRY; ModelPart& rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); noalias(Fgrad) = ZeroMatrix(dimension,dimension); noalias(FgradVel) = ZeroMatrix(dimension,dimension); NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighNodesSize = neighb_nodes.size(); if(dimension==2) { double dNdXi=rNodalSFDneigh[0]; double dNdYi=rNodalSFDneigh[1]; Fgrad(0,0)+=dNdXi*itNode->X(); Fgrad(0,1)+=dNdYi*itNode->X(); Fgrad(1,0)+=dNdXi*itNode->Y(); Fgrad(1,1)+=dNdYi*itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; unsigned int firstRow=2; if(neighSize>0) { for (unsigned int i = 0; i< neighSize-1; i++)//neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { unsigned int other_neigh_nodes_id=nodalSFDneighboursId[i+1]; for(unsigned int k = 0; k< neighNodesSize; k++) { unsigned int neigh_nodes_id= neighb_nodes[k].Id(); if(neigh_nodes_id==other_neigh_nodes_id){ dNdXi=rNodalSFDneigh[firstRow]; dNdYi=rNodalSFDneigh[firstRow+1]; Fgrad(0,0)+=dNdXi*neighb_nodes[k].X(); Fgrad(0,1)+=dNdYi*neighb_nodes[k].X(); Fgrad(1,0)+=dNdXi*neighb_nodes[k].Y(); Fgrad(1,1)+=dNdYi*neighb_nodes[k].Y(); VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X,0)*theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y,0)*theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; firstRow+=2; break; } } } } }else{ double dNdXi=rNodalSFDneigh[0]; double dNdYi=rNodalSFDneigh[1]; double dNdZi=rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z,0)*theta + itNode->FastGetSolutionStepValue(VELOCITY_Z,1)*(1-theta); Fgrad(0,0)+=dNdXi*itNode->X(); Fgrad(0,1)+=dNdYi*itNode->X(); Fgrad(0,2)+=dNdZi*itNode->X(); Fgrad(1,0)+=dNdXi*itNode->Y(); Fgrad(1,1)+=dNdYi*itNode->Y(); Fgrad(1,2)+=dNdZi*itNode->Y(); Fgrad(2,0)+=dNdXi*itNode->Z(); Fgrad(2,1)+=dNdYi*itNode->Z(); Fgrad(2,2)+=dNdZi*itNode->Z(); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(0,2)+=dNdZi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; FgradVel(1,2)+=dNdZi*VelocityY; FgradVel(2,0)+=dNdXi*VelocityZ; FgradVel(2,1)+=dNdYi*VelocityZ; FgradVel(2,2)+=dNdZi*VelocityZ; unsigned int firstRow=3; if(neighSize>0){ for (unsigned int i = 0; i< neighSize-1; i++) { unsigned int other_neigh_nodes_id=nodalSFDneighboursId[i+1]; for(unsigned int k = 0; k< neighNodesSize; k++) { unsigned int neigh_nodes_id= neighb_nodes[k].Id(); if(neigh_nodes_id==other_neigh_nodes_id) { dNdXi=rNodalSFDneigh[firstRow]; dNdYi=rNodalSFDneigh[firstRow+1]; dNdZi=rNodalSFDneigh[firstRow+2]; VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X,0)*theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X,1)*(1-theta); VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y,0)*theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y,1)*(1-theta); VelocityZ = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z,0)*theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z,1)*(1-theta); Fgrad(0,0)+=dNdXi*neighb_nodes[k].X(); Fgrad(0,1)+=dNdYi*neighb_nodes[k].X(); Fgrad(0,2)+=dNdZi*neighb_nodes[k].X(); Fgrad(1,0)+=dNdXi*neighb_nodes[k].Y(); Fgrad(1,1)+=dNdYi*neighb_nodes[k].Y(); Fgrad(1,2)+=dNdZi*neighb_nodes[k].Y(); Fgrad(2,0)+=dNdXi*neighb_nodes[k].Z(); Fgrad(2,1)+=dNdYi*neighb_nodes[k].Z(); Fgrad(2,2)+=dNdZi*neighb_nodes[k].Z(); FgradVel(0,0)+=dNdXi*VelocityX; FgradVel(0,1)+=dNdYi*VelocityX; FgradVel(0,2)+=dNdZi*VelocityX; FgradVel(1,0)+=dNdXi*VelocityY; FgradVel(1,1)+=dNdYi*VelocityY; FgradVel(1,2)+=dNdZi*VelocityY; FgradVel(2,0)+=dNdXi*VelocityZ; FgradVel(2,1)+=dNdYi*VelocityZ; FgradVel(2,2)+=dNdZi*VelocityZ; firstRow+=3; break; } } } } } // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=Fgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=FgradVel; KRATOS_CATCH(""); } void UpdateTopology(ModelPart& rModelPart, unsigned int echoLevel) { KRATOS_TRY; std::cout<<" UpdateTopology ..."<<std::endl; /* this->CalculateDisplacements(); */ CalculateDisplacementsAndResetNodalVariables(); BaseType::MoveMesh(); BoundaryNormalsCalculationUtilities BoundaryComputation; BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); std::cout<<" UpdateTopology DONE"<<std::endl; KRATOS_CATCH(""); } void CalculateDisplacementsAndResetNodalVariables() { ModelPart& rModelPart = BaseType::GetModelPart(); ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains=3*(dimension-1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i) { array_1d<double, 3 > & CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3 > & PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3 > & PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); CurrentDisplacement[0] = 0.5* TimeStep *(CurrentVelocity[0]+PreviousVelocity[0]) + PreviousDisplacement[0]; CurrentDisplacement[1] = 0.5* TimeStep *(CurrentVelocity[1]+PreviousVelocity[1]) + PreviousDisplacement[1]; if(dimension==3){ CurrentDisplacement[2] = 0.5* TimeStep *(CurrentVelocity[2]+PreviousVelocity[2]) + PreviousDisplacement[2]; } ///// reset Nodal variables ////// Vector& rNodalSFDneighbours=i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeSDFNeigh=rNodalSFDneighbours.size(); // unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1; // unsigned int sizeSDFNeigh=neighbourNodes*dimension; i->FastGetSolutionStepValue(NODAL_VOLUME)=0; i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE)=0; i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA)=0; i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE)=0; i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE)=0; noalias(rNodalSFDneighbours)=ZeroVector(sizeSDFNeigh); Vector& rSpatialDefRate=i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); noalias(rSpatialDefRate)=ZeroVector(sizeStrains); Matrix& rFgrad=i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); noalias(rFgrad)=ZeroMatrix(dimension,dimension); Matrix& rFgradVel=i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); noalias(rFgradVel)=ZeroMatrix(dimension,dimension); // if(i->FastGetSolutionStepValue(INTERFACE_NODE)==true){ Vector& rSolidNodalSFDneighbours=i->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); unsigned int solidSizeSDFNeigh=rSolidNodalSFDneighbours.size(); // unsigned int solidSizeSDFNeigh=solidNeighbourNodes*dimension; i->FastGetSolutionStepValue(SOLID_NODAL_VOLUME)=0; i->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE)=0; i->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA)=0; i->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)=0; i->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)=0; noalias(rSolidNodalSFDneighbours)=ZeroVector(solidSizeSDFNeigh); Vector& rSolidSpatialDefRate=i->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); noalias(rSolidSpatialDefRate)=ZeroVector(sizeStrains); Matrix& rSolidFgrad=i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); noalias(rSolidFgrad)=ZeroMatrix(dimension,dimension); Matrix& rSolidFgradVel=i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); noalias(rSolidFgradVel)=ZeroMatrix(dimension,dimension); // } } // } } /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "NodalTwoStepVPStrategyForFSI" ; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "NodalTwoStepVPStrategyForFSI"; } // /// Print object's data. // void PrintData(std::ostream& rOStream) const override // { // } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. NodalTwoStepVPStrategyForFSI& operator=(NodalTwoStepVPStrategyForFSI const& rOther){} /// Copy constructor. NodalTwoStepVPStrategyForFSI(NodalTwoStepVPStrategyForFSI const& rOther){} ///@} }; /// Class NodalTwoStepVPStrategyForFSI ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
perSentenceStats.h
/** * @author Samuel Larkin * @file eval/perSentenceStats.h * @brief A scoring metric wrap to perform on sentence level scoring. * * * Technologies langagieres interactives / Interactive Language Technologies * Inst. de technologie de l'information / Institute for Information Technology * Conseil national de recherches Canada / National Research Council Canada * Copyright 2011, Sa Majeste la Reine du Chef du Canada / * Copyright 2011, Her Majesty in Right of Canada */ #ifndef __PER_SENTENCE_STATS_H__ #define __PER_SENTENCE_STATS_H__ #include "basic_data_structure.h" namespace Portage { template <class ScoreMetric> class perSentenceStats { public: double total; Uint count; public: perSentenceStats() : total(0.0f) , count(0) {} perSentenceStats(const Translation& trans, const References& refs) : total(0.0f) , count(0) { init(trans, refs); } void init(const Translation& trans, const References& refs) { total = ScoreMetric::convertToDisplay(ScoreMetric(trans, refs).score()); count = 1; } /// What is this metric's name. /// @return Returns this metric's name => Amber. static const char* const name() { static const string name(string("perSentenceStats<") + ScoreMetric::name() + ">"); return name.c_str(); } /** * * @param value internal value (eg, from score()) * @return display value. */ static double convertToDisplay(double value) { return value; } /** * * @param value display value * @return internal value. */ static double convertFromDisplay(double value) { return value; } /** * Convert "internal" score value to pnorm format: in [0,1], * higher scores are better. Identical to convertToDisplay() for Amber, * but not for WER/PER! * @param value internal value (eg, from score()) * @return pnorm value */ static double convertToPnorm(double value) { return convertToDisplay(value); } /** * Convert "internal" score value from pnorm format: in [0,1], * higher scores are better. Identical to convertFromDisplay() for Amber, * but not for WER/PER! * @param value pnorm value * @return internal value */ static double convertFromPnorm(double value) { return convertFromDisplay(value); } double score() const { return (count > 0 ? total / count : 0.0f); } /** * Prints the ngram count and match length in a human readable format to out. * @param out output stream defaults to cout. */ void output(ostream &out = cout) const { out << "total: " << total << endl; out << "count: " << count << endl; out << "Score: " << score() << endl; } /** * Prints the ngrams count and match length so that it can be reread. * @param out output stream mainly a file. */ void write(ostream &out) const { out << total << "\t" << count << endl; } /** * Finds the difference in statistics between two BLEUstats objects. * @relates BLEUstats * @param other right-hand side operand * @return Returns a BLEUstats containing this - other */ perSentenceStats<ScoreMetric>& operator-=(const perSentenceStats<ScoreMetric>& other) { total -= other.total; count -= other.count; return *this; } /** * Adds together the statistics of two BLEUstats objects, returning * the result. * @relates BLEUstats * @param other right-hand side operand * @return Returns a BLEUstats containing this + other */ perSentenceStats<ScoreMetric>& operator+=(const perSentenceStats<ScoreMetric>& other) { total += other.total; count += other.count; return *this; } /// Callable entity for booststrap confidence interval. struct CIcomputer { /// Define what is an iterator for a CIcomputer. typedef typename vector<perSentenceStats<ScoreMetric> >::const_iterator iterator; /** * Cumulates all BLEUstats from the range. * @param begin start iterator * @param end end iterator * @return Returns the Amber score [0 1] once the BLEU stats are all cumulated for the range. */ double operator()(iterator begin, iterator end) { perSentenceStats<ScoreMetric> total; return std::accumulate(begin, end, total).score(); } }; }; template <class ScoreMetric> perSentenceStats<ScoreMetric> operator-(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) { perSentenceStats<ScoreMetric> result(s1); result -= s2; return result; } template <class ScoreMetric> perSentenceStats<ScoreMetric> operator+(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) { perSentenceStats<ScoreMetric> result(s1); result += s2; return result; } template <class ScoreMetric> bool operator==(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) { return s1.total == s2.total && s1.count == s2.count; } template <class ScoreMetric> bool operator!=(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) { return !(s1 == s2); } /** * Scale ScoreMetric by a constant. */ template <class ScoreMetric> perSentenceStats<ScoreMetric> operator*(perSentenceStats<ScoreMetric> &s, double c) { s.total *= c; return s; } template <class ScoreMetric> void computeArrayRow(vector<perSentenceStats<ScoreMetric> >& scores, const Nbest& nbest, const References& refs, Uint max) { const Uint K = min(max, nbest.size()); scores.resize(K); Voc voc; vector<vector<Uint> > nbest_uint; tokenize(nbest, voc, nbest_uint); vector<vector<Uint> > refs_uint; tokenize(refs, voc, refs_uint); int k; #pragma omp parallel for private(k) for (k=0; k<(int)K; ++k) { scores[k].init(nbest_uint[k], refs_uint); } } } // ends namespace Portage #endif // __PER_SENTENCE_STATS_H__
jacobi-omp.c
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve paralleism. * All do loops are parallized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 500 int n,m,mits; double tol,relax=1.0,alpha=0.0543; double u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; double dx,dy; int main (void) { float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initilize initial condition and RHS */ #pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { double omega; int i,j,k; double error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ #pragma omp parallel { #pragma omp for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp for private(resid,j,i) reduction(+:error) nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } } /* omp end parallel */ /* Error check */ k = k + 1; if (k%500==0) printf("Finished %d iteration.\n",k); error = sqrt(error)/(n*m); } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; double xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; #pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); }
kernel_exp.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @generate NDIM -> n 1 2 3 4 * Generate different functions for different dimensions. This hack improves * performance in certain cases. Value 'n' stands for general case, whereas all * other values correspond to static values of dimensionality. * During code generation step, each appearance of @NDIM (including this one) * will be replace by proposed values. If you want to use this file outside * STARS-H, simply do substitutions yourself. * * @file src/applications/spatial/kernel_exp.c * @version 0.1.1 * @author Aleksandr Mikhalev * @date 2018-11-06 */ #include "common.h" #include "starsh.h" #include "starsh-spatial.h" // If dimensionality is static #if (@NDIM != n) //! Replace variable ndim with static integer value #define ndim @NDIM #endif void starsh_ssdata_block_exp_kernel_@NDIMd(int nrows, int ncols, STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data, void *result, int ld) //! Exponential kernel for @NDIM-dimensional spatial statistics problem /*! Fills matrix \f$ A \f$ with values * \f[ * A_{ij} = \sigma^2 e^{-\frac{r_{ij}}{\beta}} + \mu \delta(r_{ij}), * \f] * where \f$ \delta \f$ is the delta function * \f[ * \delta(x) = \left\{ \begin{array}{ll} 0, & x \ne 0\\ 1, & x = 0 * \end{array} \right., * \f] * \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial * points and variance \f$ \sigma \f$, correlation length \f$ \beta \f$ and * noise \f$ \mu \f$ come from \p row_data (\ref STARSH_ssdata object). No * memory is allocated in this function! * * @param[in] nrows: Number of rows of \f$ A \f$. * @param[in] ncols: Number of columns of \f$ A \f$. * @param[in] irow: Array of row indexes. * @param[in] icol: Array of column indexes. * @param[in] row_data: Pointer to physical data (\ref STARSH_ssdata object). * @param[in] col_data: Pointer to physical data (\ref STARSH_ssdata object). * @param[out] result: Pointer to memory of \f$ A \f$. * @param[in] ld: Leading dimension of `result`. * @sa starsh_ssdata_block_exp_kernel_1d(), * starsh_ssdata_block_exp_kernel_2d(), * starsh_ssdata_block_exp_kernel_3d(), * starsh_ssdata_block_exp_kernel_4d(), * starsh_ssdata_block_exp_kernel_nd(). * @ingroup app-spatial-kernels * */ { int i, j, k; STARSH_ssdata *data1 = row_data; STARSH_ssdata *data2 = col_data; double tmp, dist; // Read parameters // If dimensionality is not static #if (@NDIM == n) int ndim = data1->particles.ndim; #endif double beta = -data1->beta; double noise = data1->noise; double sigma = data1->sigma; // Get coordinates STARSH_int count1 = data1->particles.count; STARSH_int count2 = data2->particles.count; double *x1[ndim], *x2[ndim]; x1[0] = data1->particles.point; x2[0] = data2->particles.point; //#pragma omp simd for(i = 1; i < ndim; i++) { x1[i] = x1[0]+i*count1; x2[i] = x2[0]+i*count2; } double *x1_cur, *x2_cur; double *buffer = result; // Fill column-major matrix //#pragma omp simd for(j = 0; j < ncols; j++) { for(i = 0; i < nrows; i++) { dist = 0.0; for(k = 0; k < ndim; k++) { tmp = x1[k][irow[i]]-x2[k][icol[j]]; dist += tmp*tmp; } dist = sqrt(dist)/beta; if(dist == 0) buffer[j*(size_t)ld+i] = sigma+noise; else buffer[j*(size_t)ld+i] = sigma*exp(dist); } } } void starsh_ssdata_block_exp_kernel_@NDIMd_simd(int nrows, int ncols, STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data, void *result, int ld) //! Exponential kernel for @NDIM-dimensional spatial statistics problem /*! Fills matrix \f$ A \f$ with values * \f[ * A_{ij} = \sigma^2 e^{-\frac{r_{ij}}{\beta}} + \mu \delta(r_{ij}), * \f] * where \f$ \delta \f$ is the delta function * \f[ * \delta(x) = \left\{ \begin{array}{ll} 0, & x \ne 0\\ 1, & x = 0 * \end{array} \right., * \f] * \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial * points and variance \f$ \sigma \f$, correlation length \f$ \beta \f$ and * noise \f$ \mu \f$ come from \p row_data (\ref STARSH_ssdata object). No * memory is allocated in this function! * * Uses SIMD instructions. * * @param[in] nrows: Number of rows of \f$ A \f$. * @param[in] ncols: Number of columns of \f$ A \f$. * @param[in] irow: Array of row indexes. * @param[in] icol: Array of column indexes. * @param[in] row_data: Pointer to physical data (\ref STARSH_ssdata object). * @param[in] col_data: Pointer to physical data (\ref STARSH_ssdata object). * @param[out] result: Pointer to memory of \f$ A \f$. * @param[in] ld: Leading dimension of `result`. * @sa starsh_ssdata_block_exp_kernel_1d_simd(), * starsh_ssdata_block_exp_kernel_2d_simd(), * starsh_ssdata_block_exp_kernel_3d_simd(), * starsh_ssdata_block_exp_kernel_4d_simd(), * starsh_ssdata_block_exp_kernel_nd_simd(). * @ingroup app-spatial-kernels * */ { int i, j, k; STARSH_ssdata *data1 = row_data; STARSH_ssdata *data2 = col_data; double tmp, dist; // Read parameters // If dimensionality is not static #if (@NDIM == n) int ndim = data1->particles.ndim; #endif double beta = -data1->beta; double noise = data1->noise; double sigma = data1->sigma; // Get coordinates size_t count1 = data1->particles.count; size_t count2 = data2->particles.count; double *x1[ndim], *x2[ndim]; x1[0] = data1->particles.point; x2[0] = data2->particles.point; #pragma omp simd for(i = 1; i < ndim; i++) { x1[i] = x1[0]+i*count1; x2[i] = x2[0]+i*count2; } double *x1_cur, *x2_cur; double *buffer = result; // Fill column-major matrix #pragma omp simd for(j = 0; j < ncols; j++) { for(i = 0; i < nrows; i++) { dist = 0.0; for(k = 0; k < ndim; k++) { tmp = x1[k][irow[i]]-x2[k][icol[j]]; dist += tmp*tmp; } dist = sqrt(dist)/beta; if(dist == 0) buffer[j*(size_t)ld+i] = sigma+noise; else buffer[j*(size_t)ld+i] = sigma*exp(dist); } } }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/fourier.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { PixelChannel channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; size_t number_channels; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; number_channels=MagickMin(MagickMin(MagickMin( Ar_image->number_channels,Ai_image->number_channels),MagickMin( Br_image->number_channels,Bi_image->number_channels)),MagickMin( Cr_image->number_channels,Ci_image->number_channels)); Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(Cr_image,complex_images,Cr_image->rows,1L) #endif for (y=0; y < (ssize_t) Cr_image->rows; y++) { register const Quantum *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register Quantum *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Cr_image->columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Cr_image->columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,Cr_image->columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Cr_image->columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) || (Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) || (Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) Cr_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_channels; i++) { switch (op) { case AddComplexOperator: { Cr[i]=Ar[i]+Br[i]; Ci[i]=Ai[i]+Bi[i]; break; } case ConjugateComplexOperator: default: { Cr[i]=Ar[i]; Ci[i]=(-Bi[i]); break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal((double) Br[i]*Br[i]+Bi[i]*Bi[i]+snr); Cr[i]=gamma*((double) Ar[i]*Br[i]+(double) Ai[i]*Bi[i]); Ci[i]=gamma*((double) Ai[i]*Br[i]-(double) Ar[i]*Bi[i]); break; } case MagnitudePhaseComplexOperator: { Cr[i]=sqrt((double) Ar[i]*Ar[i]+(double) Ai[i]*Ai[i]); Ci[i]=atan2((double) Ai[i],(double) Ar[i])/(2.0*MagickPI)+0.5; break; } case MultiplyComplexOperator: { Cr[i]=QuantumScale*((double) Ar[i]*Br[i]-(double) Ai[i]*Bi[i]); Ci[i]=QuantumScale*((double) Ai[i]*Br[i]+(double) Ar[i]*Bi[i]); break; } case RealImaginaryComplexOperator: { Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5)); Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5)); break; } case SubtractComplexOperator: { Cr[i]=Ar[i]-Br[i]; Ci[i]=Ai[i]-Bi[i]; break; } } } Ar+=GetPixelChannels(Ar_image); Ai+=GetPixelChannels(Ai_image); Br+=GetPixelChannels(Br_image); Bi+=GetPixelChannels(Bi_image); Cr+=GetPixelChannels(Cr_image); Ci+=GetPixelChannels(Ci_image); } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width* sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; register Quantum *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } } i++; q+=GetPixelChannels(magnitude_image); } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } } i++; q+=GetPixelChannels(phase_image); } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(image,p); break; } case GreenPixelChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(image,p); break; } case BluePixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(image,p); break; } case BlackPixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlack(image,p); break; } case AlphaPixelChannel: { source_pixels[i]=QuantumScale*GetPixelAlpha(image,p); break; } } i++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize fourier transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsImageGray(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayPixelChannel,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->alpha_trait != UndefinedPixelTrait) thread_status=ForwardFourierTransformChannel(image, AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p); break; } case GreenPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p); break; } case BluePixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p); break; } case BlackPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p); break; } case AlphaPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p); break; } } i++; p+=GetPixelChannels(magnitude_image); } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p); break; } case GreenPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p); break; } case BluePixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p); break; } case BlackPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p); break; } case AlphaPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p); break; } } i++; p+=GetPixelChannels(phase_image); } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; register Quantum *q; register ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BluePixelChannel: { SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BlackPixelChannel: { SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case AlphaPixelChannel: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } } i++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsImageGray(magnitude_image); if (is_gray != MagickFalse) is_gray=IsImageGray(phase_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayPixelChannel,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->alpha_trait != UndefinedPixelTrait) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
serialized.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc // Compilation fails for icc // XFAIL: icc #include "callback.h" int main() { #pragma omp target teams num_teams(1) thread_limit(1) #pragma omp parallel num_threads(1) { printf("In teams\n"); } return 0; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1 // CHECK: {{^}}[[MASTER]]: ompt_event_teams_begin: // CHECK-SAME: parent_task_id=[[INIT_TASK]] // CHECK-SAME: {{.*}} requested_num_teams=1 // CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]] // initial task in the teams construct starts // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=1, index=0 // parallel region forked by runtime // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]] // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]] // user parallel region // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_00:[0-9]+]] // CHECK-SAME: {{.*}} requested_team_size=1 // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_00:[0-9]+]] // CHECK-SAME: {{.*}} team_size=1, thread_num=0 // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_00]] // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]] // initial task in the teams construct ends // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0 // CHECK: {{^}}[[MASTER]]: ompt_event_teams_end: // CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]] // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1
flatsky_utils.c
#include "config.h" #include "utils.h" #include <fitsio.h> void *dftw_malloc(size_t n) { #ifdef _SPREC void *p=fftwf_malloc(n); #else //_SPREC void *p=fftw_malloc(n); #endif //_SPREC if(p==NULL) report_error(NMT_ERROR_MEMORY,"Ran out of memory\n"); return p; } void dftw_free(void *p) { #ifdef _SPREC fftwf_free(p); #else //_SPREC fftw_free(p); #endif //_SPREC } void fs_mapcpy(nmt_flatsky_info *fs,flouble *destmap,flouble *srcmap) { #pragma omp parallel default(none) \ shared(fs,destmap,srcmap) { long ip; #pragma omp for for(ip=0;ip<fs->npix;ip++) { destmap[ip]=srcmap[ip]; } //end omp for } //end omp parallel } void fs_map_product(nmt_flatsky_info *fs,flouble *mp1,flouble *mp2,flouble *mp_out) { #pragma omp parallel default(none) \ shared(fs,mp1,mp2,mp_out) { long ip; #pragma omp for for(ip=0;ip<fs->npix;ip++) { mp_out[ip]=mp1[ip]*mp2[ip]; } //end omp for } //end omp parallel } flouble fs_map_dot(nmt_flatsky_info *fs,flouble *mp1,flouble *mp2) { double sum=0; #pragma omp parallel default(none) \ shared(mp1,mp2,sum,fs) { long ip; double sum_thr=0; #pragma omp for for(ip=0;ip<fs->npix;ip++) { sum_thr+=mp1[ip]*mp2[ip]; } //end omp for #pragma omp critical { sum+=sum_thr; } //end omp critical } //end omp parallel return (flouble)(sum*fs->pixsize); } static void qu2eb(nmt_flatsky_info *fs,int spin,fcomplex **alm) { int sig_overall=-1; if(spin==0) sig_overall=1; #pragma omp parallel default(none) \ shared(fs,spin,alm,sig_overall) { int iy; fcomplex sig=sig_overall*cpow(I,spin); flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; for(ix=0;ix<=fs->nx/2;ix++) { flouble csphi,ssphi,cph,sph; fcomplex e,b; int s=0; flouble kx=ix*dkx; long index=ix+(fs->nx/2+1)*iy; flouble kmod2=kx*kx+ky*ky; if(kmod2<=0) { cph=1; sph=0; } else { flouble i_kmod=1./sqrt(kmod2); cph=kx*i_kmod; sph=ky*i_kmod; } csphi=1; ssphi=0; while(s<spin) { flouble c2=csphi*cph-ssphi*sph; flouble s2=ssphi*cph+csphi*sph; csphi=c2; ssphi=s2; s++; } e=sig*(alm[0][index]*csphi-alm[1][index]*ssphi); b=sig*(alm[0][index]*ssphi+alm[1][index]*csphi); alm[0][index]=e; alm[1][index]=b; } } //end omp for } //end omp parallel } static void eb2qu(nmt_flatsky_info *fs,int spin,fcomplex **alm) { int sig_overall=-1; if(spin==0) sig_overall=1; #pragma omp parallel default(none) \ shared(fs,spin,alm,sig_overall) { int iy; fcomplex sig=sig_overall*cpow(-I,spin); flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; for(ix=0;ix<=fs->nx/2;ix++) { flouble csphi,ssphi,cph,sph; fcomplex q,u; int s=0; flouble kx=ix*dkx; long index=ix+(fs->nx/2+1)*iy; flouble kmod2=kx*kx+ky*ky; if(kmod2<=0) { cph=1; sph=0; } else { flouble i_kmod=1./sqrt(kmod2); cph=kx*i_kmod; sph=ky*i_kmod; } csphi=1; ssphi=0; while(s<spin) { flouble c2=csphi*cph-ssphi*sph; flouble s2=ssphi*cph+csphi*sph; csphi=c2; ssphi=s2; s++; } q=sig*( alm[0][index]*csphi+alm[1][index]*ssphi); u=sig*(-alm[0][index]*ssphi+alm[1][index]*csphi); alm[0][index]=q; alm[1][index]=u; } } //end omp for } //end omp parallel } void fs_map2alm(nmt_flatsky_info *fs,int ntrans,int spin,flouble **map,fcomplex **alm) { //TODO init threads?? #ifdef _SPREC fftwf_plan plan_ft; #else //_SPREC fftw_plan plan_ft; #endif //_SPREC int imap,nmaps=1; if(spin) nmaps=2; for(imap=0;imap<nmaps*ntrans;imap++) { #ifdef _SPREC plan_ft=fftwf_plan_dft_r2c_2d(fs->ny,fs->nx,map[imap],alm[imap],FFTW_ESTIMATE); fftwf_execute(plan_ft); fftwf_destroy_plan(plan_ft); #else //_SPREC plan_ft=fftw_plan_dft_r2c_2d(fs->ny,fs->nx,map[imap],alm[imap],FFTW_ESTIMATE); fftw_execute(plan_ft); fftw_destroy_plan(plan_ft); #endif //_SPREC #pragma omp parallel default(none) \ shared(fs,alm,imap) { long ipix; flouble norm=fs->lx*fs->ly/(2*M_PI*fs->nx*fs->ny); #pragma omp for for(ipix=0;ipix<fs->ny*(fs->nx/2+1);ipix++) { alm[imap][ipix]*=norm; } //end omp for } //end omp parallel } if(nmaps>1) { //Q,U -> E,B for(imap=0;imap<ntrans*nmaps;imap+=nmaps) qu2eb(fs,spin,&(alm[imap])); } } void fs_alm2map(nmt_flatsky_info *fs,int ntrans,int spin,flouble **map,fcomplex **alm) { //TODO init threads?? #ifdef _SPREC fftwf_plan plan_ft; #else //_SPREC fftw_plan plan_ft; #endif //_SPREC int imap,nmaps=1; if(spin) nmaps=2; if(nmaps>1) { //E,B -> Q,U for(imap=0;imap<ntrans*nmaps;imap+=nmaps) eb2qu(fs,spin,&(alm[imap])); } for(imap=0;imap<nmaps*ntrans;imap++) { #ifdef _SPREC plan_ft=fftwf_plan_dft_c2r_2d(fs->ny,fs->nx,alm[imap],map[imap],FFTW_ESTIMATE); fftwf_execute(plan_ft); fftwf_destroy_plan(plan_ft); #else //_SPREC plan_ft=fftw_plan_dft_c2r_2d(fs->ny,fs->nx,alm[imap],map[imap],FFTW_ESTIMATE); fftw_execute(plan_ft); fftw_destroy_plan(plan_ft); #endif //_SPREC #pragma omp parallel default(none) \ shared(fs,map,imap) { long ipix; flouble norm=2*M_PI/(fs->lx*fs->ly); #pragma omp for for(ipix=0;ipix<fs->npix;ipix++) { map[imap][ipix]*=norm; } //end omp for } //end omp parallel } if(nmaps>1) { //Q,U -> E,B for(imap=0;imap<ntrans*nmaps;imap+=nmaps) qu2eb(fs,spin,&(alm[imap])); } } #define SAMP_RATE_SIGMA 128 #define FWHM2SIGMA_FLAT 0.00012352884853326381 nmt_k_function *fs_generate_beam_window(double fwhm_amin) { int ii; nmt_k_function *beam; flouble *larr=my_malloc(5*SAMP_RATE_SIGMA*sizeof(flouble)); flouble *farr=my_malloc(5*SAMP_RATE_SIGMA*sizeof(flouble)); double sigma=FWHM2SIGMA_FLAT*fwhm_amin; for(ii=0;ii<5*SAMP_RATE_SIGMA;ii++) { flouble l=(ii+0.0)/(SAMP_RATE_SIGMA*sigma); larr[ii]=l; farr[ii]=exp(-0.5*l*l*sigma*sigma); } beam=nmt_k_function_alloc(5*SAMP_RATE_SIGMA,larr,farr,1.,0.,0); free(larr); free(farr); return beam; } void fs_zero_alm(nmt_flatsky_info *fs,fcomplex *alm) { #pragma omp parallel default(none) \ shared(fs,alm) { int ii; #pragma omp for for(ii=0;ii<fs->ny*(fs->nx/2+1);ii++) { alm[ii]=0; } //end omp for } //end omp parallel } void fs_alter_alm(nmt_flatsky_info *fs,double fwhm_amin,fcomplex *alm_in,fcomplex *alm_out, nmt_k_function *window,int add_to_out) { nmt_k_function *beam; if(window==NULL) beam=fs_generate_beam_window(fwhm_amin); else beam=window; #pragma omp parallel default(none) \ shared(fs,alm_in,alm_out,beam,add_to_out) { int iy; flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; gsl_interp_accel *intacc_thr=gsl_interp_accel_alloc(); #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; for(ix=0;ix<=fs->nx/2;ix++) { flouble kx=ix*dkx; long index=ix+(fs->nx/2+1)*iy; flouble kmod=sqrt(kx*kx+ky*ky); if(add_to_out) alm_out[index]+=alm_in[index]*nmt_k_function_eval(beam,kmod,intacc_thr); else alm_out[index]=alm_in[index]*nmt_k_function_eval(beam,kmod,intacc_thr); } } //end omp for gsl_interp_accel_free(intacc_thr); } //end omp parallel if(window==NULL) nmt_k_function_free(beam); } void fs_alm2cl(nmt_flatsky_info *fs,nmt_binning_scheme_flat *bin, fcomplex **alms_1,fcomplex **alms_2,int spin_1,int spin_2,flouble **cls, flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y) { int i1,nmaps_1=1,nmaps_2=1; int *n_cells=my_malloc(bin->n_bands*sizeof(int)); if(spin_1) nmaps_1=2; if(spin_2) nmaps_2=2; for(i1=0;i1<nmaps_1;i1++) { int i2; fcomplex *alm1=alms_1[i1]; for(i2=0;i2<nmaps_2;i2++) { int il; fcomplex *alm2=alms_2[i2]; int index_cl=i2+nmaps_2*i1; flouble norm_factor=4*M_PI*M_PI/(fs->lx*fs->ly); for(il=0;il<bin->n_bands;il++) { cls[index_cl][il]=0; n_cells[il]=0; } #pragma omp parallel default(none) \ shared(fs,bin,alm1,alm2,index_cl,cls) \ shared(lmn_x,lmx_x,lmn_y,lmx_y,n_cells) { int iy; flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; int ik=0; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; if((ky>=lmn_y) && (ky<=lmx_y)) continue; for(ix=0;ix<fs->nx;ix++) { int ix_here; long index; flouble kmod,kx; if(2*ix<=fs->nx) { kx=ix*dkx; ix_here=ix; } else { kx=-(fs->nx-ix)*dkx; ix_here=fs->nx-ix; } if((kx>=lmn_x) && (kx<=lmx_x)) continue; index=ix_here+(fs->nx/2+1)*iy; kmod=sqrt(kx*kx+ky*ky); ik=nmt_bins_flat_search_fast(bin,kmod,ik); if(ik>=0) { #pragma omp atomic cls[index_cl][ik]+=(creal(alm1[index])*creal(alm2[index])+cimag(alm1[index])*cimag(alm2[index])); #pragma omp atomic n_cells[ik]++; } } } //end omp for } //end omp parallel for(il=0;il<bin->n_bands;il++) { if(n_cells[il]<=0) cls[index_cl][il]=0; else cls[index_cl][il]*=norm_factor/n_cells[il]; } } } free(n_cells); } void fs_anafast(nmt_flatsky_info *fs,nmt_binning_scheme_flat *bin, flouble **maps_1,flouble **maps_2,int spin_1,int spin_2,flouble **cls) { int i1; fcomplex **alms_1,**alms_2; int nmaps_1=1,nmaps_2=1; if(spin_1) nmaps_1=2; if(spin_2) nmaps_2=2; alms_1=my_malloc(nmaps_1*sizeof(fcomplex *)); for(i1=0;i1<nmaps_1;i1++) alms_1[i1]=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex)); fs_map2alm(fs,1,spin_1,maps_1,alms_1); if(maps_1==maps_2) alms_2=alms_1; else { alms_2=my_malloc(nmaps_2*sizeof(fcomplex *)); for(i1=0;i1<nmaps_2;i1++) alms_2[i1]=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex)); fs_map2alm(fs,1,spin_2,maps_2,alms_2); } fs_alm2cl(fs,bin,alms_1,alms_2,spin_1,spin_2,cls,1.,-1.,1.,-1.); for(i1=0;i1<nmaps_1;i1++) dftw_free(alms_1[i1]); free(alms_1); if(maps_1!=maps_2) { for(i1=0;i1<nmaps_2;i1++) dftw_free(alms_2[i1]); free(alms_2); } } fcomplex **fs_synalm(int nx,int ny,flouble lx,flouble ly,int nmaps, nmt_k_function **cells,nmt_k_function **beam,int seed) { int imap; fcomplex **alms; alms=my_malloc(nmaps*sizeof(fcomplex *)); for(imap=0;imap<nmaps;imap++) alms[imap]=dftw_malloc(ny*(nx/2+1)*sizeof(fcomplex)); //Switch off error handler for Cholesky decomposition gsl_error_handler_t *geh=gsl_set_error_handler_off(); int numthr=0; #pragma omp parallel default(none) \ shared(nx,ny,lx,ly,nmaps,cells,beam,seed,alms,numthr) { //This is to avoid using the omp.h library int ithr; #pragma omp critical { ithr=numthr; numthr++; } int iy; double dkx=2*M_PI/lx,dky=2*M_PI/ly; double inv_dkvol=1./(dkx*dky); gsl_vector *rv1=gsl_vector_alloc(nmaps); gsl_vector *iv1=gsl_vector_alloc(nmaps); gsl_vector *rv2=gsl_vector_alloc(nmaps); gsl_vector *iv2=gsl_vector_alloc(nmaps); gsl_matrix *clmat=gsl_matrix_calloc(nmaps,nmaps); gsl_vector *eval =gsl_vector_alloc(nmaps); gsl_matrix *evec =gsl_matrix_alloc(nmaps,nmaps); gsl_eigen_symmv_workspace *wsym=gsl_eigen_symmv_alloc(nmaps); unsigned int seed_thr=(unsigned int)(seed+ithr); gsl_rng *rng=init_rng(seed_thr); gsl_interp_accel *intacc_cells=gsl_interp_accel_alloc(); gsl_interp_accel *intacc_beam=gsl_interp_accel_alloc(); #pragma omp for for(iy=0;iy<ny;iy++) { int ix; flouble ky; if(2*iy<=ny) ky=iy*dky; else ky=-(ny-iy)*dky; for(ix=0;ix<=nx/2;ix++) { int imp1,imp2; flouble kx=ix*dkx; long index=ix+(nx/2+1)*iy; flouble kmod=sqrt(kx*kx+ky*ky); if(kmod<0) { for(imp1=0;imp1<nmaps;imp1++) alms[imp1][index]=0; } else { //Get power spectrum int icl=0; for(imp1=0;imp1<nmaps;imp1++) { for(imp2=imp1;imp2<nmaps;imp2++) {//Fill up only lower triangular part flouble cl=0.5*inv_dkvol*nmt_k_function_eval(cells[icl],kmod,intacc_cells); gsl_matrix_set(clmat,imp1,imp2,cl); if(imp2!=imp1) gsl_matrix_set(clmat,imp2,imp1,cl); icl++; } } //Take square root gsl_eigen_symmv(clmat,eval,evec,wsym); for(imp1=0;imp1<nmaps;imp1++) { double dr,di; //At the same time get white random numbers rng_gauss(rng,&dr,&di); gsl_vector_set(rv1,imp1,dr); gsl_vector_set(iv1,imp1,di); for(imp2=0;imp2<nmaps;imp2++) { double oij=gsl_matrix_get(evec,imp1,imp2); double lambda=gsl_vector_get(eval,imp2); if(lambda<=0) lambda=0; else lambda=sqrt(lambda); gsl_matrix_set(clmat,imp1,imp2,oij*lambda); } } //Get correlate random numbers gsl_blas_dgemv(CblasNoTrans,1.,clmat,rv1,0,rv2); gsl_blas_dgemv(CblasNoTrans,1.,clmat,iv1,0,iv2); for(imp1=0;imp1<nmaps;imp1++) { flouble bm=nmt_k_function_eval(beam[imp1],kmod,intacc_beam); flouble a_re=bm*gsl_vector_get(rv2,imp1); flouble a_im=bm*gsl_vector_get(iv2,imp1); if(ix==0) { if(iy>ny/2) continue; else { if(iy==0) alms[imp1][index]=(fcomplex)(M_SQRT2*a_re+I*0*a_im); else { int iyy=ny-iy; alms[imp1][index]=(fcomplex)(a_re+I*a_im); alms[imp1][ix+(nx/2+1)*iyy]=(fcomplex)(a_re-I*a_im); } } } else alms[imp1][index]=(fcomplex)(a_re+I*a_im); } } } } //omp end for gsl_vector_free(rv1); gsl_vector_free(iv1); gsl_vector_free(rv2); gsl_vector_free(iv2); gsl_matrix_free(clmat); gsl_vector_free(eval); gsl_matrix_free(evec); gsl_eigen_symmv_free(wsym); end_rng(rng); gsl_interp_accel_free(intacc_cells); gsl_interp_accel_free(intacc_beam); } //omp end parallel //Restore error handler gsl_set_error_handler(geh); return alms; } static void read_key(fitsfile *fptr,int dtype,char *key,void *val,int *status) { fits_read_key(fptr,dtype,key,val,NULL,status); if(*status) report_error(NMT_ERROR_READ,"Key %s not found\n",key); } flouble *fs_read_flat_map(char *fname,int *nx,int *ny,flouble *lx,flouble *ly,int nfield) { fitsfile *fptr; int numhdu,hdutype,naxis,naxis1,naxis2; double cdelt1,cdelt2; flouble nulval=-999; int status=0; fits_open_file(&fptr,fname,READONLY,&status); if(status) report_error(NMT_ERROR_FOPEN,"Can't open file %s\n",fname); fits_get_num_hdus(fptr,&numhdu,&status); if(nfield>=numhdu) report_error(NMT_ERROR_READ,"%d-th field doesn't exist\n",nfield); fits_movabs_hdu(fptr,nfield+1,&hdutype,&status); if(hdutype!=IMAGE_HDU) report_error(NMT_ERROR_READ,"Requested HDU is not an image\n"); //Read patch properties read_key(fptr,TINT,"NAXIS",&naxis,&status); read_key(fptr,TINT,"NAXIS1",&naxis1,&status); read_key(fptr,TINT,"NAXIS2",&naxis2,&status); read_key(fptr,TDOUBLE,"CDELT1",&cdelt1,&status); read_key(fptr,TDOUBLE,"CDELT2",&cdelt2,&status); if(naxis!=2) report_error(NMT_ERROR_READ,"Can't find a two-dimensional map\n"); *nx=naxis1; *ny=naxis2; *lx=fabs(naxis1*cdelt1)*M_PI/180; *ly=fabs(naxis2*cdelt2)*M_PI/180; //Read data long fpixel[2]={1,1}; flouble *map_out=my_malloc(naxis1*naxis2*sizeof(double)); #ifdef _SPREC fits_read_pix(fptr,TFLOAT,fpixel,naxis1*naxis2,&nulval,map_out,NULL,&status); #else //_SPREC fits_read_pix(fptr,TDOUBLE,fpixel,naxis1*naxis2,&nulval,map_out,NULL,&status); #endif //_SPREC if(status) report_error(NMT_ERROR_READ,"Error reading image from file %s\n",fname); fits_close_file(fptr,&status); return map_out; }
GB_unop__exp2_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp2_fc64_fc64) // op(A') function: GB (_unop_tran__exp2_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexp2 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexp2 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexp2 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp2_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexp2 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexp2 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp2_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } }
GB_unop__minv_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_bool_bool) // op(A') function: GB (_unop_tran__minv_bool_bool) // C type: bool // A type: bool // cast: ; // unaryop: cij = true #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = true ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_bool_bool) ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = true ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = true ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_bool_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_int64 // op(A') function: GB_tran__lnot_int64_int64 // C type: int64_t // A type: int64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_int64 ( int64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_scene_types.h" # include "DNA_object_types.h" # include "DNA_object_force_types.h" # include "DNA_meshdata_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; i++) { if (i > 0) { printf("\n"); } printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; q++) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; j++) { if (j > 0 && j % 3 == 0) { printf("\n"); } for (i = 0; i < size; i++) { if (i > 0 && i % 3 == 0) { printf(" "); } implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; i++) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif /* Conjugate gradient algorithm to solve Ax=b. */ cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) { root_to_world_v3(data, index, x, data->X[index]); } if (v) { root_to_world_v3(data, index, v, data->V[index]); } } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; i++) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor); } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } float BPH_tri_tetra_volume_signed_6x(Implicit_Data *data, int v1, int v2, int v3) { /* The result will be 6x the volume */ return volume_tri_tetrahedron_signed_v3_6x(data->X[v1], data->X[v2], data->X[v3]); } void BPH_mass_spring_force_pressure( Implicit_Data *data, int v1, int v2, int v3, float pressure_difference) { float nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = pressure_difference * area / 3.0f; /* add pressure to each of the face verts */ madd_v3_v3fl(data->F[v1], nor, factor); madd_v3_v3fl(data->F[v2], nor, factor); madd_v3_v3fl(data->F[v3], nor, factor); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return fbstar_fl; } else { return tempfb_fl; } } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) { sub_v3_v3(edge_ij, dx); } if (q == j) { add_v3_v3(edge_ij, dx); } normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) { sub_v3_v3(edge_jk, dx); } if (q == k) { add_v3_v3(edge_jk, dx); } normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) { sub_v3_v3(vel_ij, dv); } if (q == j) { add_v3_v3(vel_ij, dv); } sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) { sub_v3_v3(vel_jk, dv); } if (q == k) { add_v3_v3(vel_jk, dv); } /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; b++) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; b++) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
lsh_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ /*********************************************************************** * Author: Vincent Rabaud *************************************************************************/ #ifndef FLANN_LSH_INDEX_H_ #define FLANN_LSH_INDEX_H_ #include <algorithm> #include <cassert> #include <cstring> #include <map> #include <vector> #include "flann/general.h" #include "flann/algorithms/nn_index.h" #include "flann/util/matrix.h" #include "flann/util/result_set.h" #include "flann/util/heap.h" #include "flann/util/lsh_table.h" #include "flann/util/allocator.h" #include "flann/util/random.h" #include "flann/util/saving.h" namespace flann { struct LshIndexParams : public IndexParams { LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) { (* this)["algorithm"] = FLANN_INDEX_LSH; // The number of hash tables to use (*this)["table_number"] = table_number; // The length of the key in the hash tables (*this)["key_size"] = key_size; // Number of levels to use in multi-probe (0 for standard LSH) (*this)["multi_probe_level"] = multi_probe_level; } }; /** * Locality-sensitive hashing index * * Contains the tables and other information for indexing a set of points * for nearest-neighbor matching. */ template<typename Distance> class LshIndex : public NNIndex<Distance> { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; typedef NNIndex<Distance> BaseClass; /** Constructor * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); } /** Constructor * @param input_data dataset with the input features * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); setDataset(input_data); } LshIndex(const LshIndex& other) : BaseClass(other), tables_(other.tables_), table_number_(other.table_number_), key_size_(other.key_size_), multi_probe_level_(other.multi_probe_level_), xor_masks_(other.xor_masks_) { } LshIndex& operator=(LshIndex other) { this->swap(other); return *this; } virtual ~LshIndex() { freeIndex(); } BaseClass* clone() const { return new LshIndex(*this); } using BaseClass::buildIndex; void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { assert(points.cols==veclen_); size_t old_size = size_; extendDataset(points); if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) { buildIndex(); } else { for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; for (size_t i=old_size;i<size_;++i) { table.add(i, points_[i]); } } } } flann_algorithm_t getType() const { return FLANN_INDEX_LSH; } template<typename Archive> void serialize(Archive& ar) { ar.setObject(this); ar & *static_cast<NNIndex<Distance>*>(this); ar & table_number_; ar & key_size_; ar & multi_probe_level_; ar & xor_masks_; ar & tables_; if (Archive::is_loading::value) { index_params_["algorithm"] = getType(); index_params_["table_number"] = table_number_; index_params_["key_size"] = key_size_; index_params_["multi_probe_level"] = multi_probe_level_; } } void saveIndex(FILE* stream) { serialization::SaveArchive sa(stream); sa & *this; } void loadIndex(FILE* stream) { serialization::LoadArchive la(stream); la & *this; } /** * Computes the index memory usage * Returns: memory used by the index */ int usedMemory() const { return size_ * sizeof(int); } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * Find set of nearest neighbors to vec. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * maxCheck = the maximum number of restarts (in a best-bin-first manner) */ int findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) const { getNeighbors(vec, result); return 0; } protected: /** * Builds the index */ void buildIndexImpl() { tables_.resize(table_number_); std::vector<std::pair<size_t,ElementType*> > features; features.reserve(points_.size()); for (size_t i=0;i<points_.size();++i) { features.push_back(std::make_pair(i, points_[i])); } for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; table = lsh::LshTable<ElementType>(veclen_, key_size_); // Add the features to the table table.add(features); } } void freeIndex() { /* nothing to do here */ } private: /** Defines the comparator on score and index */ typedef std::pair<float, unsigned int> ScoreIndexPair; struct SortScoreIndexPairOnSecond { bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const { return left.second < right.second; } }; /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH * @param key the key we build neighbors from * @param lowest_index the lowest index of the bit set * @param level the multi-probe level we are at * @param xor_masks all the xor mask */ void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, std::vector<lsh::BucketKey>& xor_masks) { xor_masks.push_back(key); if (level == 0) return; for (int index = lowest_index - 1; index >= 0; --index) { // Create a new key lsh::BucketKey new_key = key | (lsh::BucketKey(1) << index); fill_xor_mask(new_key, index, level - 1, xor_masks); } } /** Performs the approximate nearest-neighbor search. * @param vec the feature to analyze * @param do_radius flag indicating if we check the radius too * @param radius the radius if it is a radius search * @param do_k flag indicating if we limit the number of nn * @param k_nn the number of nearest neighbors * @param checked_average used for debugging */ void getNeighbors(const ElementType* vec, bool do_radius, float radius, bool do_k, unsigned int k_nn, float& checked_average) { static std::vector<ScoreIndexPair> score_index_heap; if (do_k) { unsigned int worst_score = std::numeric_limits<unsigned int>::max(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; hamming_distance = distance_(vec, points_[*training_index].point, veclen_); if (hamming_distance < worst_score) { // Insert the new element score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); std::push_heap(score_index_heap.begin(), score_index_heap.end()); if (score_index_heap.size() > (unsigned int)k_nn) { // Remove the highest distance value as we have too many elements std::pop_heap(score_index_heap.begin(), score_index_heap.end()); score_index_heap.pop_back(); // Keep track of the worst score worst_score = score_index_heap.front().first; } } } } } } else { typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; // Compute the Hamming distance hamming_distance = distance_(vec, points_[*training_index].point, veclen_); if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); } } } } } /** Performs the approximate nearest-neighbor search. * This is a slower version than the above as it uses the ResultSet * @param vec the feature to analyze */ void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result) const { typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; // Compute the Hamming distance hamming_distance = distance_(vec, points_[*training_index], veclen_); result.addPoint(hamming_distance, *training_index); } } } } void swap(LshIndex& other) { BaseClass::swap(other); std::swap(tables_, other.tables_); std::swap(size_at_build_, other.size_at_build_); std::swap(table_number_, other.table_number_); std::swap(key_size_, other.key_size_); std::swap(multi_probe_level_, other.multi_probe_level_); std::swap(xor_masks_, other.xor_masks_); } /** The different hash tables */ std::vector<lsh::LshTable<ElementType> > tables_; /** table number */ unsigned int table_number_; /** key size */ unsigned int key_size_; /** How far should we look for neighbors in multi-probe LSH */ unsigned int multi_probe_level_; /** The XOR masks to apply to a key to get the neighboring buckets */ std::vector<lsh::BucketKey> xor_masks_; USING_BASECLASS_SYMBOLS }; } #endif //FLANN_LSH_INDEX_H_
omp_reduce.c
/* * Full CI */ #include <stdlib.h> #include <complex.h> #include "config.h" void NPomp_dsum_reduce_inplace(double **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); unsigned int bit, thread_src; unsigned int mask = 0; double *dst = vec[thread_id]; double *src; size_t i; #pragma omp barrier for (bit = 0; (1<<bit) < nthreads; bit++) { mask |= 1 << bit; if (!(thread_id & mask)) { thread_src = thread_id | (1<<bit); if (thread_src < nthreads) { src = vec[thread_src]; for (i = 0; i < count; i++) { dst[i] += src[i]; } } } #pragma omp barrier } } void NPomp_dprod_reduce_inplace(double **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); unsigned int bit, thread_src; unsigned int mask = 0; double *dst = vec[thread_id]; double *src; size_t i; #pragma omp barrier for (bit = 0; (1<<bit) < nthreads; bit++) { mask |= 1 << bit; if (!(thread_id & mask)) { thread_src = thread_id | (1<<bit); if (thread_src < nthreads) { src = vec[thread_src]; for (i = 0; i < count; i++) { dst[i] *= src[i]; } } } #pragma omp barrier } } void NPomp_zsum_reduce_inplace(double complex **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); unsigned int bit, thread_src; unsigned int mask = 0; double complex *dst = vec[thread_id]; double complex *src; size_t i; #pragma omp barrier for (bit = 0; (1<<bit) < nthreads; bit++) { mask |= 1 << bit; if (!(thread_id & mask)) { thread_src = thread_id | (1<<bit); if (thread_src < nthreads) { src = vec[thread_src]; for (i = 0; i < count; i++) { dst[i] += src[i]; } } } #pragma omp barrier } } void NPomp_zprod_reduce_inplace(double complex **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); unsigned int bit, thread_src; unsigned int mask = 0; double complex *dst = vec[thread_id]; double complex *src; size_t i; #pragma omp barrier for (bit = 0; (1<<bit) < nthreads; bit++) { mask |= 1 << bit; if (!(thread_id & mask)) { thread_src = thread_id | (1<<bit); if (thread_src < nthreads) { src = vec[thread_src]; for (i = 0; i < count; i++) { dst[i] *= src[i]; } } } #pragma omp barrier } } #ifdef _OPENMP int get_omp_threads() { return omp_get_max_threads(); } int set_omp_threads(int n) { omp_set_num_threads(n); return n; } #else // mimic omp_get_max_threads omp_set_num_threads function of libgomp int get_omp_threads() { return 1; } int set_omp_threads(int n) { return 0; } #endif
GB_unop__expm1_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__expm1_fc32_fc32) // op(A') function: GB (_unop_tran__expm1_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_cexpm1f (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexpm1f (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_cexpm1f (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__expm1_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cexpm1f (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cexpm1f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__expm1_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmp_atomic.c
/* * kmp_atomic.c -- ATOMIC implementation routines */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "kmp_atomic.h" #include "kmp.h" // TRUE, asm routines prototypes typedef unsigned char uchar; typedef unsigned short ushort; /*! @defgroup ATOMIC_OPS Atomic Operations These functions are used for implementing the many different varieties of atomic operations. The compiler is at liberty to inline atomic operations that are naturally supported by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined @code static int s = 0; #pragma omp atomic s++; @endcode using the single instruction: `lock; incl s` However the runtime does provide entrypoints for these operations to support compilers that choose not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the increment above.) The names of the functions are encoded by using the data type name and the operation name, as in these tables. Data Type | Data type encoding -----------|--------------- int8_t | `fixed1` uint8_t | `fixed1u` int16_t | `fixed2` uint16_t | `fixed2u` int32_t | `fixed4` uint32_t | `fixed4u` int32_t | `fixed8` uint32_t | `fixed8u` float | `float4` double | `float8` float 10 (8087 eighty bit float) | `float10` complex<float> | `cmplx4` complex<double> | `cmplx8` complex<float10> | `cmplx10` <br> Operation | Operation encoding ----------|------------------- + | add - | sub \* | mul / | div & | andb << | shl \>\> | shr \| | orb ^ | xor && | andl \|\| | orl maximum | max minimum | min .eqv. | eqv .neqv. | neqv <br> For non-commutative operations, `_rev` can also be added for the reversed operation. For the functions that capture the result, the suffix `_cpt` is added. Update Functions ================ The general form of an atomic function that just performs an update (without a `capture`) @code void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand `capture` functions =================== The capture functions perform an atomic update and return a result, which is either the value before the capture, or that after. They take an additional argument to determine which result is returned. Their general form is therefore @code TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand @param flag one if the result is to be captured *after* the operation, zero if captured *before*. The one set of exceptions to this is the `complex<float>` type where the value is not returned, rather an extra argument pointer is passed. They look like @code void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag ); @endcode Read and Write Operations ========================= The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the value is read or written atomically, with no modification performed. In many cases on IA-32 architecture these operations can be inlined since the architecture guarantees that no tearing occurs on aligned objects accessed with a single memory operation of up to 64 bits in size. The general form of the read operations is @code TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc ); @endcode For the write operations the form is @code void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode Full list of functions ====================== This leads to the generation of 376 atomic functions, as follows. Functons for integers --------------------- There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters). @code __kmpc_atomic_fixed1_add __kmpc_atomic_fixed1_add_cpt __kmpc_atomic_fixed1_add_fp __kmpc_atomic_fixed1_andb __kmpc_atomic_fixed1_andb_cpt __kmpc_atomic_fixed1_andl __kmpc_atomic_fixed1_andl_cpt __kmpc_atomic_fixed1_div __kmpc_atomic_fixed1_div_cpt __kmpc_atomic_fixed1_div_cpt_rev __kmpc_atomic_fixed1_div_float8 __kmpc_atomic_fixed1_div_fp __kmpc_atomic_fixed1_div_rev __kmpc_atomic_fixed1_eqv __kmpc_atomic_fixed1_eqv_cpt __kmpc_atomic_fixed1_max __kmpc_atomic_fixed1_max_cpt __kmpc_atomic_fixed1_min __kmpc_atomic_fixed1_min_cpt __kmpc_atomic_fixed1_mul __kmpc_atomic_fixed1_mul_cpt __kmpc_atomic_fixed1_mul_float8 __kmpc_atomic_fixed1_mul_fp __kmpc_atomic_fixed1_neqv __kmpc_atomic_fixed1_neqv_cpt __kmpc_atomic_fixed1_orb __kmpc_atomic_fixed1_orb_cpt __kmpc_atomic_fixed1_orl __kmpc_atomic_fixed1_orl_cpt __kmpc_atomic_fixed1_rd __kmpc_atomic_fixed1_shl __kmpc_atomic_fixed1_shl_cpt __kmpc_atomic_fixed1_shl_cpt_rev __kmpc_atomic_fixed1_shl_rev __kmpc_atomic_fixed1_shr __kmpc_atomic_fixed1_shr_cpt __kmpc_atomic_fixed1_shr_cpt_rev __kmpc_atomic_fixed1_shr_rev __kmpc_atomic_fixed1_sub __kmpc_atomic_fixed1_sub_cpt __kmpc_atomic_fixed1_sub_cpt_rev __kmpc_atomic_fixed1_sub_fp __kmpc_atomic_fixed1_sub_rev __kmpc_atomic_fixed1_swp __kmpc_atomic_fixed1_wr __kmpc_atomic_fixed1_xor __kmpc_atomic_fixed1_xor_cpt __kmpc_atomic_fixed1u_add_fp __kmpc_atomic_fixed1u_sub_fp __kmpc_atomic_fixed1u_mul_fp __kmpc_atomic_fixed1u_div __kmpc_atomic_fixed1u_div_cpt __kmpc_atomic_fixed1u_div_cpt_rev __kmpc_atomic_fixed1u_div_fp __kmpc_atomic_fixed1u_div_rev __kmpc_atomic_fixed1u_shr __kmpc_atomic_fixed1u_shr_cpt __kmpc_atomic_fixed1u_shr_cpt_rev __kmpc_atomic_fixed1u_shr_rev __kmpc_atomic_fixed2_add __kmpc_atomic_fixed2_add_cpt __kmpc_atomic_fixed2_add_fp __kmpc_atomic_fixed2_andb __kmpc_atomic_fixed2_andb_cpt __kmpc_atomic_fixed2_andl __kmpc_atomic_fixed2_andl_cpt __kmpc_atomic_fixed2_div __kmpc_atomic_fixed2_div_cpt __kmpc_atomic_fixed2_div_cpt_rev __kmpc_atomic_fixed2_div_float8 __kmpc_atomic_fixed2_div_fp __kmpc_atomic_fixed2_div_rev __kmpc_atomic_fixed2_eqv __kmpc_atomic_fixed2_eqv_cpt __kmpc_atomic_fixed2_max __kmpc_atomic_fixed2_max_cpt __kmpc_atomic_fixed2_min __kmpc_atomic_fixed2_min_cpt __kmpc_atomic_fixed2_mul __kmpc_atomic_fixed2_mul_cpt __kmpc_atomic_fixed2_mul_float8 __kmpc_atomic_fixed2_mul_fp __kmpc_atomic_fixed2_neqv __kmpc_atomic_fixed2_neqv_cpt __kmpc_atomic_fixed2_orb __kmpc_atomic_fixed2_orb_cpt __kmpc_atomic_fixed2_orl __kmpc_atomic_fixed2_orl_cpt __kmpc_atomic_fixed2_rd __kmpc_atomic_fixed2_shl __kmpc_atomic_fixed2_shl_cpt __kmpc_atomic_fixed2_shl_cpt_rev __kmpc_atomic_fixed2_shl_rev __kmpc_atomic_fixed2_shr __kmpc_atomic_fixed2_shr_cpt __kmpc_atomic_fixed2_shr_cpt_rev __kmpc_atomic_fixed2_shr_rev __kmpc_atomic_fixed2_sub __kmpc_atomic_fixed2_sub_cpt __kmpc_atomic_fixed2_sub_cpt_rev __kmpc_atomic_fixed2_sub_fp __kmpc_atomic_fixed2_sub_rev __kmpc_atomic_fixed2_swp __kmpc_atomic_fixed2_wr __kmpc_atomic_fixed2_xor __kmpc_atomic_fixed2_xor_cpt __kmpc_atomic_fixed2u_add_fp __kmpc_atomic_fixed2u_sub_fp __kmpc_atomic_fixed2u_mul_fp __kmpc_atomic_fixed2u_div __kmpc_atomic_fixed2u_div_cpt __kmpc_atomic_fixed2u_div_cpt_rev __kmpc_atomic_fixed2u_div_fp __kmpc_atomic_fixed2u_div_rev __kmpc_atomic_fixed2u_shr __kmpc_atomic_fixed2u_shr_cpt __kmpc_atomic_fixed2u_shr_cpt_rev __kmpc_atomic_fixed2u_shr_rev __kmpc_atomic_fixed4_add __kmpc_atomic_fixed4_add_cpt __kmpc_atomic_fixed4_add_fp __kmpc_atomic_fixed4_andb __kmpc_atomic_fixed4_andb_cpt __kmpc_atomic_fixed4_andl __kmpc_atomic_fixed4_andl_cpt __kmpc_atomic_fixed4_div __kmpc_atomic_fixed4_div_cpt __kmpc_atomic_fixed4_div_cpt_rev __kmpc_atomic_fixed4_div_float8 __kmpc_atomic_fixed4_div_fp __kmpc_atomic_fixed4_div_rev __kmpc_atomic_fixed4_eqv __kmpc_atomic_fixed4_eqv_cpt __kmpc_atomic_fixed4_max __kmpc_atomic_fixed4_max_cpt __kmpc_atomic_fixed4_min __kmpc_atomic_fixed4_min_cpt __kmpc_atomic_fixed4_mul __kmpc_atomic_fixed4_mul_cpt __kmpc_atomic_fixed4_mul_float8 __kmpc_atomic_fixed4_mul_fp __kmpc_atomic_fixed4_neqv __kmpc_atomic_fixed4_neqv_cpt __kmpc_atomic_fixed4_orb __kmpc_atomic_fixed4_orb_cpt __kmpc_atomic_fixed4_orl __kmpc_atomic_fixed4_orl_cpt __kmpc_atomic_fixed4_rd __kmpc_atomic_fixed4_shl __kmpc_atomic_fixed4_shl_cpt __kmpc_atomic_fixed4_shl_cpt_rev __kmpc_atomic_fixed4_shl_rev __kmpc_atomic_fixed4_shr __kmpc_atomic_fixed4_shr_cpt __kmpc_atomic_fixed4_shr_cpt_rev __kmpc_atomic_fixed4_shr_rev __kmpc_atomic_fixed4_sub __kmpc_atomic_fixed4_sub_cpt __kmpc_atomic_fixed4_sub_cpt_rev __kmpc_atomic_fixed4_sub_fp __kmpc_atomic_fixed4_sub_rev __kmpc_atomic_fixed4_swp __kmpc_atomic_fixed4_wr __kmpc_atomic_fixed4_xor __kmpc_atomic_fixed4_xor_cpt __kmpc_atomic_fixed4u_add_fp __kmpc_atomic_fixed4u_sub_fp __kmpc_atomic_fixed4u_mul_fp __kmpc_atomic_fixed4u_div __kmpc_atomic_fixed4u_div_cpt __kmpc_atomic_fixed4u_div_cpt_rev __kmpc_atomic_fixed4u_div_fp __kmpc_atomic_fixed4u_div_rev __kmpc_atomic_fixed4u_shr __kmpc_atomic_fixed4u_shr_cpt __kmpc_atomic_fixed4u_shr_cpt_rev __kmpc_atomic_fixed4u_shr_rev __kmpc_atomic_fixed8_add __kmpc_atomic_fixed8_add_cpt __kmpc_atomic_fixed8_add_fp __kmpc_atomic_fixed8_andb __kmpc_atomic_fixed8_andb_cpt __kmpc_atomic_fixed8_andl __kmpc_atomic_fixed8_andl_cpt __kmpc_atomic_fixed8_div __kmpc_atomic_fixed8_div_cpt __kmpc_atomic_fixed8_div_cpt_rev __kmpc_atomic_fixed8_div_float8 __kmpc_atomic_fixed8_div_fp __kmpc_atomic_fixed8_div_rev __kmpc_atomic_fixed8_eqv __kmpc_atomic_fixed8_eqv_cpt __kmpc_atomic_fixed8_max __kmpc_atomic_fixed8_max_cpt __kmpc_atomic_fixed8_min __kmpc_atomic_fixed8_min_cpt __kmpc_atomic_fixed8_mul __kmpc_atomic_fixed8_mul_cpt __kmpc_atomic_fixed8_mul_float8 __kmpc_atomic_fixed8_mul_fp __kmpc_atomic_fixed8_neqv __kmpc_atomic_fixed8_neqv_cpt __kmpc_atomic_fixed8_orb __kmpc_atomic_fixed8_orb_cpt __kmpc_atomic_fixed8_orl __kmpc_atomic_fixed8_orl_cpt __kmpc_atomic_fixed8_rd __kmpc_atomic_fixed8_shl __kmpc_atomic_fixed8_shl_cpt __kmpc_atomic_fixed8_shl_cpt_rev __kmpc_atomic_fixed8_shl_rev __kmpc_atomic_fixed8_shr __kmpc_atomic_fixed8_shr_cpt __kmpc_atomic_fixed8_shr_cpt_rev __kmpc_atomic_fixed8_shr_rev __kmpc_atomic_fixed8_sub __kmpc_atomic_fixed8_sub_cpt __kmpc_atomic_fixed8_sub_cpt_rev __kmpc_atomic_fixed8_sub_fp __kmpc_atomic_fixed8_sub_rev __kmpc_atomic_fixed8_swp __kmpc_atomic_fixed8_wr __kmpc_atomic_fixed8_xor __kmpc_atomic_fixed8_xor_cpt __kmpc_atomic_fixed8u_add_fp __kmpc_atomic_fixed8u_sub_fp __kmpc_atomic_fixed8u_mul_fp __kmpc_atomic_fixed8u_div __kmpc_atomic_fixed8u_div_cpt __kmpc_atomic_fixed8u_div_cpt_rev __kmpc_atomic_fixed8u_div_fp __kmpc_atomic_fixed8u_div_rev __kmpc_atomic_fixed8u_shr __kmpc_atomic_fixed8u_shr_cpt __kmpc_atomic_fixed8u_shr_cpt_rev __kmpc_atomic_fixed8u_shr_rev @endcode Functions for floating point ---------------------------- There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes. (Ten byte floats are used by X87, but are now rare). @code __kmpc_atomic_float4_add __kmpc_atomic_float4_add_cpt __kmpc_atomic_float4_add_float8 __kmpc_atomic_float4_add_fp __kmpc_atomic_float4_div __kmpc_atomic_float4_div_cpt __kmpc_atomic_float4_div_cpt_rev __kmpc_atomic_float4_div_float8 __kmpc_atomic_float4_div_fp __kmpc_atomic_float4_div_rev __kmpc_atomic_float4_max __kmpc_atomic_float4_max_cpt __kmpc_atomic_float4_min __kmpc_atomic_float4_min_cpt __kmpc_atomic_float4_mul __kmpc_atomic_float4_mul_cpt __kmpc_atomic_float4_mul_float8 __kmpc_atomic_float4_mul_fp __kmpc_atomic_float4_rd __kmpc_atomic_float4_sub __kmpc_atomic_float4_sub_cpt __kmpc_atomic_float4_sub_cpt_rev __kmpc_atomic_float4_sub_float8 __kmpc_atomic_float4_sub_fp __kmpc_atomic_float4_sub_rev __kmpc_atomic_float4_swp __kmpc_atomic_float4_wr __kmpc_atomic_float8_add __kmpc_atomic_float8_add_cpt __kmpc_atomic_float8_add_fp __kmpc_atomic_float8_div __kmpc_atomic_float8_div_cpt __kmpc_atomic_float8_div_cpt_rev __kmpc_atomic_float8_div_fp __kmpc_atomic_float8_div_rev __kmpc_atomic_float8_max __kmpc_atomic_float8_max_cpt __kmpc_atomic_float8_min __kmpc_atomic_float8_min_cpt __kmpc_atomic_float8_mul __kmpc_atomic_float8_mul_cpt __kmpc_atomic_float8_mul_fp __kmpc_atomic_float8_rd __kmpc_atomic_float8_sub __kmpc_atomic_float8_sub_cpt __kmpc_atomic_float8_sub_cpt_rev __kmpc_atomic_float8_sub_fp __kmpc_atomic_float8_sub_rev __kmpc_atomic_float8_swp __kmpc_atomic_float8_wr __kmpc_atomic_float10_add __kmpc_atomic_float10_add_cpt __kmpc_atomic_float10_add_fp __kmpc_atomic_float10_div __kmpc_atomic_float10_div_cpt __kmpc_atomic_float10_div_cpt_rev __kmpc_atomic_float10_div_fp __kmpc_atomic_float10_div_rev __kmpc_atomic_float10_mul __kmpc_atomic_float10_mul_cpt __kmpc_atomic_float10_mul_fp __kmpc_atomic_float10_rd __kmpc_atomic_float10_sub __kmpc_atomic_float10_sub_cpt __kmpc_atomic_float10_sub_cpt_rev __kmpc_atomic_float10_sub_fp __kmpc_atomic_float10_sub_rev __kmpc_atomic_float10_swp __kmpc_atomic_float10_wr __kmpc_atomic_float16_add __kmpc_atomic_float16_add_cpt __kmpc_atomic_float16_div __kmpc_atomic_float16_div_cpt __kmpc_atomic_float16_div_cpt_rev __kmpc_atomic_float16_div_rev __kmpc_atomic_float16_max __kmpc_atomic_float16_max_cpt __kmpc_atomic_float16_min __kmpc_atomic_float16_min_cpt __kmpc_atomic_float16_mul __kmpc_atomic_float16_mul_cpt __kmpc_atomic_float16_rd __kmpc_atomic_float16_sub __kmpc_atomic_float16_sub_cpt __kmpc_atomic_float16_sub_cpt_rev __kmpc_atomic_float16_sub_rev __kmpc_atomic_float16_swp __kmpc_atomic_float16_wr @endcode Functions for Complex types --------------------------- Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes. The names here are based on the size of the component float, *not* the size of the complex type. So `__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`. @code __kmpc_atomic_cmplx4_add __kmpc_atomic_cmplx4_add_cmplx8 __kmpc_atomic_cmplx4_add_cpt __kmpc_atomic_cmplx4_div __kmpc_atomic_cmplx4_div_cmplx8 __kmpc_atomic_cmplx4_div_cpt __kmpc_atomic_cmplx4_div_cpt_rev __kmpc_atomic_cmplx4_div_rev __kmpc_atomic_cmplx4_mul __kmpc_atomic_cmplx4_mul_cmplx8 __kmpc_atomic_cmplx4_mul_cpt __kmpc_atomic_cmplx4_rd __kmpc_atomic_cmplx4_sub __kmpc_atomic_cmplx4_sub_cmplx8 __kmpc_atomic_cmplx4_sub_cpt __kmpc_atomic_cmplx4_sub_cpt_rev __kmpc_atomic_cmplx4_sub_rev __kmpc_atomic_cmplx4_swp __kmpc_atomic_cmplx4_wr __kmpc_atomic_cmplx8_add __kmpc_atomic_cmplx8_add_cpt __kmpc_atomic_cmplx8_div __kmpc_atomic_cmplx8_div_cpt __kmpc_atomic_cmplx8_div_cpt_rev __kmpc_atomic_cmplx8_div_rev __kmpc_atomic_cmplx8_mul __kmpc_atomic_cmplx8_mul_cpt __kmpc_atomic_cmplx8_rd __kmpc_atomic_cmplx8_sub __kmpc_atomic_cmplx8_sub_cpt __kmpc_atomic_cmplx8_sub_cpt_rev __kmpc_atomic_cmplx8_sub_rev __kmpc_atomic_cmplx8_swp __kmpc_atomic_cmplx8_wr __kmpc_atomic_cmplx10_add __kmpc_atomic_cmplx10_add_cpt __kmpc_atomic_cmplx10_div __kmpc_atomic_cmplx10_div_cpt __kmpc_atomic_cmplx10_div_cpt_rev __kmpc_atomic_cmplx10_div_rev __kmpc_atomic_cmplx10_mul __kmpc_atomic_cmplx10_mul_cpt __kmpc_atomic_cmplx10_rd __kmpc_atomic_cmplx10_sub __kmpc_atomic_cmplx10_sub_cpt __kmpc_atomic_cmplx10_sub_cpt_rev __kmpc_atomic_cmplx10_sub_rev __kmpc_atomic_cmplx10_swp __kmpc_atomic_cmplx10_wr __kmpc_atomic_cmplx16_add __kmpc_atomic_cmplx16_add_cpt __kmpc_atomic_cmplx16_div __kmpc_atomic_cmplx16_div_cpt __kmpc_atomic_cmplx16_div_cpt_rev __kmpc_atomic_cmplx16_div_rev __kmpc_atomic_cmplx16_mul __kmpc_atomic_cmplx16_mul_cpt __kmpc_atomic_cmplx16_rd __kmpc_atomic_cmplx16_sub __kmpc_atomic_cmplx16_sub_cpt __kmpc_atomic_cmplx16_sub_cpt_rev __kmpc_atomic_cmplx16_swp __kmpc_atomic_cmplx16_wr @endcode */ /*! @ingroup ATOMIC_OPS @{ */ /* * Global vars */ #ifndef KMP_GOMP_COMPAT int __kmp_atomic_mode = 1; // Intel perf #else int __kmp_atomic_mode = 2; // GOMP compatibility #endif /* KMP_GOMP_COMPAT */ KMP_ALIGN(128) kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */ kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */ kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */ kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */ kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/ kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/ kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */ /* 2007-03-02: Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a bug on *_32 and *_32e. This is just a temporary workaround for the problem. It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG routines in assembler language. */ #define KMP_ATOMIC_VOLATILE volatile #if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; }; #endif /* ------------------------------------------------------------------------ */ /* ATOMIC implementation routines */ /* one routine for each operation and operand type */ /* ------------------------------------------------------------------------ */ // All routines declarations looks like // void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs ); // ------------------------------------------------------------------------ #define KMP_CHECK_GTID \ if ( gtid == KMP_GTID_UNKNOWN ) { \ gtid = __kmp_entry_gtid(); \ } // check and get gtid when needed // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Lock variables used for critical sections for various size operands #define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat #define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char #define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short #define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int #define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float #define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int #define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double #define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex #define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double #define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad #define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex #define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex #define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) OP (rhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ // For GNU compatibility, we may need to use a critical section, // even though it is not required by the ISA. // // On IA-32 architecture, all atomic operations except for fixed 4 byte add, // sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common // critical section. On Intel(R) 64, all atomic operations are done with fetch // and add or compare and exchange. Therefore, the FLAG parameter to this // macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which // require a critical section, where we predict that they will be implemented // in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()). // // When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct, // the FLAG parameter should always be 1. If we know that we will be using // a critical section, then we want to make certain that we use the generic // lock __kmp_atomic_lock to protect the atomic update, and not of of the // locks that are specialized based upon the size or type of the data. // // If FLAG is 0, then we are relying on dead code elimination by the build // compiler to get rid of the useless block of code, and save a needless // branch at runtime. // #ifdef KMP_GOMP_COMPAT # define OP_GOMP_CRITICAL(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL( OP, 0 ); \ return; \ } # else # define OP_GOMP_CRITICAL(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ #if KMP_MIC # define KMP_DO_PAUSE _mm_delay_32( 1 ) #else # define KMP_DO_PAUSE KMP_CPU_PAUSE() #endif /* KMP_MIC */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator #define OP_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE old_value, new_value; \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ } \ } #if USE_CMPXCHG_FIX // 2007-06-25: // workaround for C78287 (complex(kind=4) data type) // lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm) // Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro. // This is a problem of the compiler. // Related tracker is C76005, targeted to 11.0. // I verified the asm of the workaround. #define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ { \ struct _sss { \ TYPE cmp; \ kmp_int##BITS *vvv; \ }; \ struct _sss old_value, new_value; \ old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \ new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \ { \ KMP_DO_PAUSE; \ \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ } \ } // end of the first part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #endif #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // end of the second part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // Routines for ATOMIC 4-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub // Routines for ATOMIC 8-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // MASK - used for alignment check // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG /* ------------------------------------------------------------------------ */ /* Routines for C/C++ Reduction operators && and || */ /* ------------------------------------------------------------------------ */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment // TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used #define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CRITICAL( = *lhs OP, LCK_ID ) \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl /* ------------------------------------------------------------------------- */ /* Routines for Fortran operators that matched no one in C: */ /* MAX, MIN, .EQV., .NEQV. */ /* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */ /* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */ /* ------------------------------------------------------------------------- */ // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ *lhs = rhs; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT( OP, 0 ); \ return; \ } #else #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value; \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT(OP,LCK_ID) \ } \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ } \ } #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \ } \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min #if KMP_HAVE_QUAD MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16 MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16 #endif #endif // ------------------------------------------------------------------------ // Need separate macros for .EQV. because of the need of complement (~) // OP ignored for critical sections, ^=~ used instead #define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \ } // ------------------------------------------------------------------------ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16 ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16 ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16 ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16 #endif #endif // routines for complex types #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div // end of the workaround for C78287 #else ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div #endif // USE_CMPXCHG_FIX ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div #if KMP_HAVE_QUAD ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16 ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16 ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16 ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16 #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: x = expr binop x for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) = (rhs) OP (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_REV( OP, 0 ); \ return; \ } #else #define OP_GOMP_CRITICAL_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev #endif #endif // routines for complex types ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev #endif #endif #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 // End of OpenMP 4.0: x = expr binop x for non-commutative operations. #endif //OMP_40_ENABLED /* ------------------------------------------------------------------------ */ /* Routines for mixed types of LHS and RHS, when RHS is "larger" */ /* Note: in order to reduce the total number of types combinations */ /* it is supposed that compiler converts RHS to longest floating type,*/ /* that is _Quad, before call to any of these routines */ /* Conversion to _Quad will be done by the compiler during calculation, */ /* conversion back to TYPE - before the assignment, like: */ /* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */ /* Performance penalty expected because of SW emulation use */ /* ------------------------------------------------------------------------ */ #define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } #define ATOMIC_CRITICAL_REV_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // RHS=float8 ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8 ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them) #if KMP_HAVE_QUAD ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_add_fp ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_fp ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_mul_fp ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_add_fp ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_fp ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_mul_fp ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_add_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_mul_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_add_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_mul_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // Reverse operations ATOMIC_CMPXCHG_REV_MIX( fixed1, char, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed1, char, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2, short, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2, short, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, sub_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, div_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, sub_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, div_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev_fp ATOMIC_CRITICAL_REV_FP( float10, long double, sub_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_rev_fp ATOMIC_CRITICAL_REV_FP( float10, long double, div_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_rev_fp #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #else #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #endif // USE_CMPXCHG_FIX #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 ////////////////////////////////////////////////////////////////////////////////////////////////////// // ------------------------------------------------------------------------ // Atomic READ routines // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store_ret" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) // TODO: check if it is still necessary // Return old value regardless of the result of "compare & swap# operation #define OP_CMPXCHG_READ(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ union f_i_union { \ TYPE f_val; \ kmp_int##BITS i_val; \ }; \ union f_i_union old_value; \ temp_val = *loc; \ old_value.f_val = temp_val; \ old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \ new_value = old_value.f_val; \ return new_value; \ } // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_READ(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ new_value = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_READ(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \ return new_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ OP_CMPXCHG_READ(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \ return new_value; \ } // ------------------------------------------------------------------------ // Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work. // Let's return the read value through the additional parameter. #if ( KMP_OS_WINDOWS ) #define OP_CRITICAL_READ_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*out) = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \ } #endif // KMP_OS_WINDOWS // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd // !!! TODO: Remove lock operations for "char" since it can't be non-atomic ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd #endif // KMP_HAVE_QUAD // Fix for CQ220361 on Windows* OS #if ( KMP_OS_WINDOWS ) ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #else ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #endif ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd #endif #endif // ------------------------------------------------------------------------ // Atomic WRITE routines // ------------------------------------------------------------------------ #define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_FIXED##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_REAL##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_WR(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ OP_CMPXCHG_WR(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #else ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #endif ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #else ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #endif ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr #endif ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr #endif #endif // ------------------------------------------------------------------------ // Atomic CAPTURE routines // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ new_value = (*lhs); \ } else { \ new_value = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE old_value, new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ if( flag ) { \ return old_value OP rhs; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ////////////////////////////////// // CAPTURE routines for mixed types RHS=float16 #if KMP_HAVE_QUAD // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_CPT_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, add_cpt, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, sub_cpt, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, mul_cpt, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, div_cpt, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, add_cpt, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, sub_cpt, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, mul_cpt, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, div_cpt, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, add_cpt, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, sub_cpt, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, mul_cpt, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, div_cpt, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_fp #endif //KMP_HAVE_QUAD /////////////////////////////////// // ------------------------------------------------------------------------ // Routines for C/C++ Reduction operators && and || // ------------------------------------------------------------------------ // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_L_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ new_value OP rhs; \ } else \ new_value = (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_L_CPT( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment #define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt // ------------------------------------------------------------------------- // Routines for Fortran operators that matched no one in C: // MAX, MIN, .EQV., .NEQV. // Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt // Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ old_value = *lhs; \ *lhs = rhs; \ if ( flag ) \ new_value = rhs; \ else \ new_value = old_value; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; \ // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT_CPT( OP, 0 ); \ } #else #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*TYPE old_value; */ \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ if( flag ) \ return rhs; \ else \ return old_value; \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ } \ return *lhs; \ } #define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ } \ return *lhs; \ } MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt #if KMP_HAVE_QUAD MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt #endif #endif // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_WRK( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \ } // The end of workaround for cmplx4 /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt #endif #endif // routines for complex types // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ /*temp_val = (*lhs);*/\ (*lhs) = (rhs) OP (*lhs); \ new_value = (*lhs); \ } else { \ new_value = (*lhs);\ (*lhs) = (rhs) OP (*lhs); \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev #endif #endif // routines for complex types // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) = (rhs) OP (*lhs); \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) = (rhs) OP (*lhs); \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ } // The end of workaround for cmplx4 // !!! TODO: check if we need to return void for cmplx4 routines // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev #endif #endif // Capture reverse for mixed type: RHS=float16 #if KMP_HAVE_QUAD // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT_REV(OP,LCK_ID) /* send assignment */ \ } ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, sub_cpt_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, div_cpt_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, sub_cpt_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, div_cpt_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev_fp ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, sub_cpt_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev_fp ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, div_cpt_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev_fp #endif //KMP_HAVE_QUAD // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} #define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ old_value = (*lhs); \ (*lhs) = rhs; \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return old_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP( 0 ); \ } #else #define GOMP_CRITICAL_SWP(FLAG) #endif /* KMP_GOMP_COMPAT */ #define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define CMPXCHG_SWP(TYPE,BITS) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CMPXCHG_SWP(TYPE,BITS) \ } ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #else ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #endif // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) #define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CRITICAL_SWP(LCK_ID) \ } // ------------------------------------------------------------------------ // !!! TODO: check if we need to return void for cmplx4 routines // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP_WRK(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ tmp = (*lhs); \ (*lhs) = (rhs); \ (*out) = tmp; \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP_WRK(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP_WRK( 0 ); \ } #else #define GOMP_CRITICAL_SWP_WRK(FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ TYPE tmp; \ GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \ CRITICAL_SWP_WRK(LCK_ID) \ } // The end of workaround for cmplx4 ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp #endif // cmplx4 routine to return void ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp //ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp #endif #endif // End of OpenMP 4.0 Capture #endif //OMP_40_ENABLED #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 #undef OP_CRITICAL /* ------------------------------------------------------------------------ */ /* Generic atomic routines */ /* ------------------------------------------------------------------------ */ void __kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #else TRUE #endif ) { kmp_int8 old_value, new_value; old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs, *(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 1-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid ); } } void __kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */ #endif ) { kmp_int16 old_value, new_value; old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs, *(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 2-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid ); } } void __kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( // // FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints. // Gomp compatibility is broken if this routine is called for floats. // #if KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */ #endif ) { kmp_int32 old_value, new_value; old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs, *(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_4i for all 4-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid ); } } void __kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */ #endif ) { kmp_int64 old_value, new_value; old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs, *(kmp_int64 *) &old_value, *(kmp_int64 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_8i for all 8-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid ); } } void __kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid ); } void __kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid ); } void __kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid ); } void __kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid ); } // AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler // duplicated in order to not use 3-party names in pure Intel code // TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin. void __kmpc_atomic_start(void) { int gtid = __kmp_entry_gtid(); KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid)); __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); } void __kmpc_atomic_end(void) { int gtid = __kmp_get_gtid(); KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid)); __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); } /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /*! @} */ // end of file
matfit.c
#include "matrix.h" #include <string.h> #define MAX_ITERS_RB 20 /** \brief Performs 2-d polynomial model fitting using least squares * * \param[in] A Input data column matrix * \param[in] Y Input observation column matrix * \param[in] deg Polynomial degree \f$ N \f$ * \param[in] result Matrix to store the result * \return Polynomial co-efficient matrix \f$ \begin{bmatrix} \alpha_N & \cdots & \alpha_0\end{bmatrix}^T \f$ * */ MATRIX mat_linear_ls_fit(MATRIX A, MATRIX Y, int deg, MATRIX result) { int i, j, n; MATRIX B; n = MatRow(A); B = mat_creat(n, deg+1, ONES_MATRIX); #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=deg-1; j>=0; --j) B[i][j] = A[i][0]*B[i][j+1]; } result = mat_least_squares(B, Y, result); mat_free(B); return result; } /** \brief Solves linear equations using least squares * * \param[in] A Input data matrix * \param[in] Y Input observation matrix * \param[in] result Matrix to store the result * \return \f$ \left(\mathbf{A}^{T}\mathbf{A}\right)^{-1}\mathbf{A}^{T}\mathbf{Y} \f$ * */ MATRIX mat_least_squares(MATRIX A, MATRIX Y, MATRIX result) { int m, n, o, i, j, k; MATRIX Apinv; if(MatRow(A)!=MatRow(Y)) return mat_error(MAT_SIZEMISMATCH); Apinv = mat_pinv(A, NULL); if(Apinv ==NULL) return mat_error(MAT_INVERSE_ILL_COND); m = MatCol(Apinv); n = MatRow(Apinv); o = MatCol(Y); if(result==NULL) if((result= mat_creat(n, o, UNDEFINED))==NULL) return mat_error(MAT_MALLOC); #pragma omp parallel for private(j, k) firstprivate(m) for(i=0; i<n; ++i) { for(j=0; j<o; ++j) { for(k=0, result[i][j]=0.0; k<m; ++k) { result[i][j] += Apinv[i][k]*Y[k][j]; } } } mat_free(Apinv); return result; } /** \brief Solves linear equations using weighted least squares * * \param[in] A Input data matrix * \param[in] Y Input observation matrix * \param[in] w Input weight column matrix * \param[in] result Matrix to store the result * \return \f$ \left(\mathbf{A}^{T}\textrm{diag}(w)\mathbf{A}\right)^{-1}\mathbf{A}^{T}\textrm{diag}(w)\mathbf{Y} \f$ * */ MATRIX mat_w_least_squares(MATRIX A, MATRIX Y, MATRIX w, MATRIX result) { int m, n, o, i, j, k; MATRIX Awpinv, W; if(MatRow(w)!= MatRow(Y)) return mat_error(MAT_SIZEMISMATCH); W = mat_creat_diag(w, NULL); if(MatRow(A)!= MatRow(Y)) return mat_error(MAT_SIZEMISMATCH); Awpinv = mat_wpinv(A, W, NULL); if(Awpinv==NULL) return mat_error(MAT_INVERSE_ILL_COND); m = MatCol(Awpinv); n = MatRow(Awpinv); o = MatCol(Y); if(result==NULL) if((result = mat_creat(n, o, UNDEFINED))==NULL) return mat_error(MAT_MALLOC); #pragma omp parallel for private(j, k) firstprivate(m) for(i=0; i<n; ++i) { for(j=0; j<o; ++j) { for(k=0, result[i][j]=0.0; k<m; ++k) { result[i][j] += Awpinv[i][k]*Y[k][j]; } } } mat_free(Awpinv); mat_free(W); return result; } /** \brief Solves linear equations using robust reweighted least squares * * \param[in] A Input data matrix * \param[in] Y Input observation matrix * \param[in] lossfunc Loss function type (MAT_LOSS_BISQUARE/MAT_LOSS_HUBER) * \param[in] result Matrix to store the result * \return Robust \f$ \mathbf{X}\f$ * */ MATRIX mat_rob_least_squares(MATRIX A, MATRIX Y, int lossfunc, MATRIX result) { int n, k; int flag = 0; mtype med = 0, madn_ = 0, norm_th= 0; MATRIX res = NULL, res_ = NULL, W = NULL, tmp1 = NULL, tmp2 = NULL; n = MatRow(A); W = mat_creat(n, 1, ONES_MATRIX); tmp1 = mat_abs(Y, NULL); norm_th = 0.0001f*mat_sum(tmp1); mat_free(tmp1); tmp1 = NULL; for(k=0; k<MAX_ITERS_RB && flag==0; ++k) { result = mat_w_least_squares(A, Y, W, result); tmp1 = mat_mul(A, result, tmp1); res_ = mat_sub(tmp1, Y, res_); if(k==0) { med = mat_median(res_); tmp1 = mat_subs(res_, med, tmp1); tmp2 = mat_abs(tmp1, tmp2); madn_ = mat_median(tmp2)*1.4826+(mtype)eps;/* *6.9414 */ mat_free(tmp2); } res = mat_abs(res_, res); if(mat_sum(res)<norm_th) flag = 1; if(k!=(MAX_ITERS_RB-1)) { switch(lossfunc) { case MAT_LOSS_HUBER: W = mat_huber_wt(res, 1.0, madn_*1.345, W); break; case MAT_LOSS_BISQUARE: W = mat_bisquare_wt(res, 1.0, madn_*4.685, W); break; default: W = mat_bisquare_wt(res, 1.0, madn_*4.685, W); } } } mat_free(W); mat_free(res); mat_free(res_); mat_free(tmp1); return result; } /** \brief Performs 2-d polynomial model fitting using robust least squares * * \param[in] A Input data column matrix * \param[in] Y Input observation column matrix * \param[in] deg Polynomial degree \f$ N \f$ * \param[in] lossfunc Loss function type (MAT_LOSS_BISQUARE/MAT_LOSS_HUBER) * \param[in] result Matrix to store the result * \return Polynomial co-efficient matrix \f$ \begin{bmatrix} \alpha_N & \cdots & \alpha_0\end{bmatrix}^T \f$ * */ MATRIX mat_robust_fit(MATRIX A, MATRIX Y, int deg, int lossfunc, MATRIX result) { int i, j, n; MATRIX B = NULL; n = MatRow(A); B = mat_creat(n, deg+1, ONES_MATRIX); #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=deg-1; j>=0; --j) B[i][j] = A[i][0]*B[i][j+1]; } result = mat_rob_least_squares(B, Y, lossfunc, result); mat_free(B); return result; }
GB_binop__ne_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_fc32 // A.*B function (eWiseMult): GB_AemultB__ne_fc32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__ne_fc32 // C+=b function (dense accum): GB_Cdense_accumb__ne_fc32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_fc32 // C=scalar+B GB_bind1st__ne_fc32 // C=scalar+B' GB_bind1st_tran__ne_fc32 // C=A+scalar GB_bind2nd__ne_fc32 // C=A'+scalar GB_bind2nd_tran__ne_fc32 // C type: bool // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_ne (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = (crealf (Ax [pA]) != 0) || (cimagf (Ax [pA]) != 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = (crealf (Bx [pB]) != 0) || (cimagf (Bx [pB]) != 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_ne (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FC32 || GxB_NO_NE_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_fc32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_fc32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ne_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_fc32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_ne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_fc32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_ne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_ne (x, aij) ; \ } GrB_Info GB_bind1st_tran__ne_fc32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_ne (aij, y) ; \ } GrB_Info GB_bind2nd_tran__ne_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BsplineFunctor.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: John R. Gergely, University of Illinois at Urbana-Champaign // Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign // Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory // Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory // Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign // Jaron T. Krogel, krogeljt@ornl.gov, Oak Ridge National Laboratory // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_BSPLINE_FUNCTOR_H #define QMCPLUSPLUS_BSPLINE_FUNCTOR_H #include "Numerics/OptimizableFunctorBase.h" #include "Utilities/ProgressReportEngine.h" #include "OhmmsData/AttributeSet.h" #include "Numerics/LinearFit.h" #include "simd/allocator.hpp" #include <cstdio> namespace qmcplusplus { template<class T> struct BsplineFunctor: public OptimizableFunctorBase { typedef real_type value_type; int NumParams; int Dummy; const real_type A[16], dA[16], d2A[16], d3A[16]; aligned_vector<real_type> SplineCoefs; //static const real_type A[16], dA[16], d2A[16]; real_type DeltaR, DeltaRInv; real_type CuspValue; real_type Y, dY, d2Y; // Stores the derivatives w.r.t. SplineCoefs // of the u, du/dr, and d2u/dr2 std::vector<TinyVector<real_type,3> > SplineDerivs; std::vector<real_type> Parameters; std::vector<std::string> ParameterNames; std::string elementType, pairType; std::string fileName; int ResetCount; bool notOpt; bool periodic; ///constructor BsplineFunctor(real_type cusp=0.0) : NumParams(0), A{-1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0, 3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0, -3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0, 1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0}, dA{0.0, -0.5, 1.0, -0.5, 0.0, 1.5, -2.0, 0.0, 0.0, -1.5, 1.0, 0.5, 0.0, 0.5, 0.0, 0.0}, d2A{0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 3.0, -2.0, 0.0, 0.0, -3.0, 1.0, 0.0, 0.0, 1.0, 0.0}, d3A{0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, -3.0, 0.0, 0.0, 0.0, 1.0}, CuspValue(cusp), ResetCount(0), notOpt(false), periodic(true) { cutoff_radius = 0.0; } OptimizableFunctorBase* makeClone() const { return new BsplineFunctor(*this); } void setCusp(real_type c) { CuspValue = c; } void setPeriodic(bool p) { periodic = p; } void resize(int n) { NumParams = n; int numCoefs = NumParams + 4; int numKnots = numCoefs - 2; DeltaR = cutoff_radius / (real_type)(numKnots - 1); DeltaRInv = 1.0/DeltaR; Parameters.resize(n); SplineCoefs.resize(numCoefs); SplineDerivs.resize(numCoefs); } void reset() { int numCoefs = NumParams + 4; int numKnots = numCoefs - 2; DeltaR = cutoff_radius / (real_type)(numKnots - 1); DeltaRInv = 1.0/DeltaR; for (int i=0; i<SplineCoefs.size(); i++) SplineCoefs[i] = 0.0; // Ensure that cusp conditions is satisfied at the origin SplineCoefs[1] = Parameters[0]; SplineCoefs[2] = Parameters[1]; SplineCoefs[0] = Parameters[1] - 2.0*DeltaR * CuspValue; for (int i=2; i<Parameters.size(); i++) SplineCoefs[i+1] = Parameters[i]; } /** compute value, gradient and laplacian for [iStart, iEnd) pairs * @param iat dummy * @param iStart starting particle index * @param iEnd ending particle index * @param _distArray distance arrUay * @param _valArray u(r_j) for j=[iStart,iEnd) * @param _gradArray du(r_j)/dr /r_j for j=[iStart,iEnd) * @param _lapArray d2u(r_j)/dr2 for j=[iStart,iEnd) * @param distArrayCompressed temp storage to filter r_j < cutoff_radius * @param distIndices temp storage for the compressed index */ void evaluateVGL(const int iat, const int iStart, const int iEnd, const T* _distArray, T* restrict _valArray, T* restrict _gradArray, T* restrict _laplArray, T* restrict distArrayCompressed, int* restrict distIndices ) const; /** evaluate sum of the pair potentials for [iStart,iEnd) * @param iat dummy * @param iStart starting particle index * @param iEnd ending particle index * @param _distArray distance arrUay * @param distArrayCompressed temp storage to filter r_j < cutoff_radius * @return \f$\sum u(r_j)\f$ for r_j < cutoff_radius */ T evaluateV(const int iat, const int iStart, const int iEnd, const T* restrict _distArray, T* restrict distArrayCompressed) const; inline real_type evaluate(real_type r) { if (r >= cutoff_radius) return 0.0; r *= DeltaRInv; real_type ipart, t; t = std::modf(r, &ipart); int i = (int) ipart; real_type tp[4]; tp[0] = t*t*t; tp[1] = t*t; tp[2] = t; tp[3] = 1.0; return (SplineCoefs[i+0]*(A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3])+ SplineCoefs[i+1]*(A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3])+ SplineCoefs[i+2]*(A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3])+ SplineCoefs[i+3]*(A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3])); } inline real_type evaluate(real_type r, real_type rinv) { return Y=evaluate(r,dY,d2Y); } inline void evaluateAll(real_type r, real_type rinv) { Y=evaluate(r,dY,d2Y); } inline real_type evaluate(real_type r, real_type& dudr, real_type& d2udr2) { if (r >= cutoff_radius) { dudr = d2udr2 = 0.0; return 0.0; } // real_type eps = 1.0e-5; // real_type dudr_FD = (evaluate(r+eps)-evaluate(r-eps))/(2.0*eps); // real_type d2udr2_FD = (evaluate(r+eps)+evaluate(r-eps)-2.0*evaluate(r))/(eps*eps); r *= DeltaRInv; real_type ipart, t; t = std::modf(r, &ipart); int i = (int) ipart; real_type tp[4]; tp[0] = t*t*t; tp[1] = t*t; tp[2] = t; tp[3] = 1.0; d2udr2 = DeltaRInv * DeltaRInv * (SplineCoefs[i+0]*(d2A[ 0]*tp[0] + d2A[ 1]*tp[1] + d2A[ 2]*tp[2] + d2A[ 3]*tp[3])+ SplineCoefs[i+1]*(d2A[ 4]*tp[0] + d2A[ 5]*tp[1] + d2A[ 6]*tp[2] + d2A[ 7]*tp[3])+ SplineCoefs[i+2]*(d2A[ 8]*tp[0] + d2A[ 9]*tp[1] + d2A[10]*tp[2] + d2A[11]*tp[3])+ SplineCoefs[i+3]*(d2A[12]*tp[0] + d2A[13]*tp[1] + d2A[14]*tp[2] + d2A[15]*tp[3])); dudr = DeltaRInv * (SplineCoefs[i+0]*(dA[ 0]*tp[0] + dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3])+ SplineCoefs[i+1]*(dA[ 4]*tp[0] + dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3])+ SplineCoefs[i+2]*(dA[ 8]*tp[0] + dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3])+ SplineCoefs[i+3]*(dA[12]*tp[0] + dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3])); // if (std::abs(dudr_FD-dudr) > 1.0e-8) // std::cerr << "Error in BsplineFunction: dudr = " << dudr // << " dudr_FD = " << dudr_FD << std::endl; // if (std::abs(d2udr2_FD-d2udr2) > 1.0e-4) // std::cerr << "Error in BsplineFunction: r = " << r << " d2udr2 = " << dudr // << " d2udr2_FD = " << d2udr2_FD << " rcut = " << cutoff_radius << std::endl; return (SplineCoefs[i+0]*(A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3])+ SplineCoefs[i+1]*(A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3])+ SplineCoefs[i+2]*(A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3])+ SplineCoefs[i+3]*(A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3])); } inline real_type evaluate(real_type r, real_type& dudr, real_type& d2udr2, real_type &d3udr3) { if (r >= cutoff_radius) { dudr = d2udr2 = d3udr3 = 0.0; return 0.0; } // real_type eps = 1.0e-5; // real_type dudr_FD = (evaluate(r+eps)-evaluate(r-eps))/(2.0*eps); // real_type d2udr2_FD = (evaluate(r+eps)+evaluate(r-eps)-2.0*evaluate(r))/(eps*eps); // real_type d3udr3_FD = (-1.0*evaluate(r+1.0*eps) // +2.0*evaluate(r+0.5*eps) // -2.0*evaluate(r-0.5*eps) // +1.0*evaluate(r-1.0*eps))/(eps*eps*eps); r *= DeltaRInv; real_type ipart, t; t = std::modf(r, &ipart); int i = (int) ipart; real_type tp[4]; tp[0] = t*t*t; tp[1] = t*t; tp[2] = t; tp[3] = 1.0; d3udr3 = DeltaRInv * DeltaRInv * DeltaRInv * (SplineCoefs[i+0]*(d3A[ 0]*tp[0] + d3A[ 1]*tp[1] + d3A[ 2]*tp[2] + d3A[ 3]*tp[3])+ SplineCoefs[i+1]*(d3A[ 4]*tp[0] + d3A[ 5]*tp[1] + d3A[ 6]*tp[2] + d3A[ 7]*tp[3])+ SplineCoefs[i+2]*(d3A[ 8]*tp[0] + d3A[ 9]*tp[1] + d3A[10]*tp[2] + d3A[11]*tp[3])+ SplineCoefs[i+3]*(d3A[12]*tp[0] + d3A[13]*tp[1] + d3A[14]*tp[2] + d3A[15]*tp[3])); d2udr2 = DeltaRInv * DeltaRInv * (SplineCoefs[i+0]*(d2A[ 0]*tp[0] + d2A[ 1]*tp[1] + d2A[ 2]*tp[2] + d2A[ 3]*tp[3])+ SplineCoefs[i+1]*(d2A[ 4]*tp[0] + d2A[ 5]*tp[1] + d2A[ 6]*tp[2] + d2A[ 7]*tp[3])+ SplineCoefs[i+2]*(d2A[ 8]*tp[0] + d2A[ 9]*tp[1] + d2A[10]*tp[2] + d2A[11]*tp[3])+ SplineCoefs[i+3]*(d2A[12]*tp[0] + d2A[13]*tp[1] + d2A[14]*tp[2] + d2A[15]*tp[3])); dudr = DeltaRInv * (SplineCoefs[i+0]*(dA[ 0]*tp[0] + dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3])+ SplineCoefs[i+1]*(dA[ 4]*tp[0] + dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3])+ SplineCoefs[i+2]*(dA[ 8]*tp[0] + dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3])+ SplineCoefs[i+3]*(dA[12]*tp[0] + dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3])); // if (std::abs(dudr_FD-dudr) > 1.0e-8) // std::cerr << "Error in BsplineFunction: dudr = " << dudr // << " dudr_FD = " << dudr_FD << std::endl; // if (std::abs(d2udr2_FD-d2udr2) > 1.0e-4) // std::cerr << "Error in BsplineFunction: r = " << r << " d2udr2 = " << dudr // << " d2udr2_FD = " << d2udr2_FD << " rcut = " << cutoff_radius << std::endl; // if (std::abs(d3udr3_FD-d3udr3) > 1.0e-4) // std::cerr << "Error in BsplineFunction: r = " << r << " d3udr3 = " << dudr // << " d3udr3_FD = " << d3udr3_FD << " rcut = " << cutoff_radius << std::endl; return (SplineCoefs[i+0]*(A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3])+ SplineCoefs[i+1]*(A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3])+ SplineCoefs[i+2]*(A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3])+ SplineCoefs[i+3]*(A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3])); } inline bool evaluateDerivatives(real_type r, std::vector<TinyVector<real_type,3> >& derivs) { if (r >= cutoff_radius) return false; r *= DeltaRInv; real_type ipart, t; t = std::modf(r, &ipart); int i = (int) ipart; real_type tp[4]; tp[0] = t*t*t; tp[1] = t*t; tp[2] = t; tp[3] = 1.0; SplineDerivs[0] = TinyVector<real_type,3>(0.0); // d/dp_i u(r) SplineDerivs[i+0][0] = A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3]; SplineDerivs[i+1][0] = A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3]; SplineDerivs[i+2][0] = A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3]; SplineDerivs[i+3][0] = A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3]; // d/dp_i du/dr SplineDerivs[i+0][1] = DeltaRInv * (dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3]); SplineDerivs[i+1][1] = DeltaRInv * (dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3]); SplineDerivs[i+2][1] = DeltaRInv * (dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3]); SplineDerivs[i+3][1] = DeltaRInv * (dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3]); // d/dp_i d2u/dr2 SplineDerivs[i+0][2] = DeltaRInv * DeltaRInv * (d2A[ 2]*tp[2] + d2A[ 3]*tp[3]); SplineDerivs[i+1][2] = DeltaRInv * DeltaRInv * (d2A[ 6]*tp[2] + d2A[ 7]*tp[3]); SplineDerivs[i+2][2] = DeltaRInv * DeltaRInv * (d2A[10]*tp[2] + d2A[11]*tp[3]); SplineDerivs[i+3][2] = DeltaRInv * DeltaRInv * (d2A[14]*tp[2] + d2A[15]*tp[3]); int imin=std::max(i,1); int imax=std::min(i+4,NumParams+1); for (int n=imin; n<imax; ++n) derivs[n-1] = SplineDerivs[n]; derivs[1]+=SplineDerivs[0]; //real_type v[4],dv[4],d2v[4]; //v[0] = A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3]; //v[1] = A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3]; //v[2] = A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3]; //v[3] = A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3]; //// d/dp_i du/dr //dv[0] = DeltaRInv * (dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3]); //dv[1] = DeltaRInv * (dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3]); //dv[2] = DeltaRInv * (dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3]); //dv[3] = DeltaRInv * (dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3]); //// d/dp_i d2u/dr2 //d2v[0] = DeltaRInv * DeltaRInv * (d2A[ 2]*tp[2] + d2A[ 3]*tp[3]); //d2v[1] = DeltaRInv * DeltaRInv * (d2A[ 6]*tp[2] + d2A[ 7]*tp[3]); //d2v[2] = DeltaRInv * DeltaRInv * (d2A[10]*tp[2] + d2A[11]*tp[3]); //d2v[3] = DeltaRInv * DeltaRInv * (d2A[14]*tp[2] + d2A[15]*tp[3]); //int imin=std::max(i,1); //int imax=std::min(i+4,NumParams+1)-1; //int n=imin-1, j=imin-i; //while(n<imax && j<4) //{ // derivs[n] = TinyVector<real_type,3>(v[j],dv[j],d2v[j]); // n++; j++; //} //if(i==0) derivs[1]+= TinyVector<real_type,3>(v[0],dv[0],d2v[0]); return true; } inline bool evaluateDerivatives(real_type r, std::vector<real_type>& derivs) { if (r >= cutoff_radius) return false; real_type tp[4],v[4],ipart,t; t = std::modf(r*DeltaRInv, &ipart); tp[0] = t*t*t; tp[1] = t*t; tp[2] = t; tp[3] = 1.0; v[0] = A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3]; v[1] = A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3]; v[2] = A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3]; v[3] = A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3]; int i = (int) ipart; int imin=std::max(i,1); int imax=std::min(i+4,NumParams+1)-1; int n=imin-1, j=imin-i; while(n<imax && j<4) { derivs[n] = v[j]; n++; j++; } if(i==0) derivs[1]+= v[0]; return true; } inline real_type f(real_type r) { if (r>=cutoff_radius) return 0.0; return evaluate(r); } inline real_type df(real_type r) { if (r>=cutoff_radius) return 0.0; real_type du, d2u; evaluate(r, du, d2u); return du; } bool put(xmlNodePtr cur) { ReportEngine PRE("BsplineFunctor","put(xmlNodePtr)"); //CuspValue = -1.0e10; NumParams = 0; //cutoff_radius = 0.0; OhmmsAttributeSet rAttrib; real_type radius = -1.0; rAttrib.add(NumParams, "size"); rAttrib.add(radius, "rcut"); rAttrib.add(radius, "cutoff"); rAttrib.put(cur); if (radius < 0.0) if (periodic) app_log() << " Jastrow cutoff unspecified. Setting to Wigner-Seitz radius = " << cutoff_radius << ".\n"; else { APP_ABORT(" Jastrow cutoff unspecified. Cutoff must be given when using open boundary conditions"); } else if (periodic && radius > cutoff_radius) { if (radius - cutoff_radius > 1e-4) { APP_ABORT( " The Jastrow cutoff specified should not be larger than Wigner-Seitz radius."); } else { app_log() << " The Jastrow cutoff specified is slightly larger than the Wigner-Seitz radius."; app_log() << " Setting to Wigner-Seitz radius = " << cutoff_radius << ".\n"; } } else cutoff_radius = radius; if (NumParams == 0) { PRE.error("You must specify a positive number of parameters for the Bspline jastrow function.",true); } app_log() << " size = " << NumParams << " parameters " << std::endl; app_log() << " cusp = " << CuspValue << std::endl; app_log() << " rcut = " << cutoff_radius << std::endl; resize(NumParams); // Now read coefficents xmlNodePtr xmlCoefs = cur->xmlChildrenNode; while (xmlCoefs != NULL) { std::string cname((const char*)xmlCoefs->name); if (cname == "coefficients") { std::string type("0"), id("0"); std::string optimize("yes"); OhmmsAttributeSet cAttrib; cAttrib.add(id, "id"); cAttrib.add(type, "type"); cAttrib.add(optimize, "optimize"); cAttrib.put(xmlCoefs); if (type != "Array") { PRE.error("Unknown correlation type " + type + " in BsplineFunctor." + "Resetting to \"Array\""); xmlNewProp(xmlCoefs, (const xmlChar*) "type", (const xmlChar*) "Array"); } std::vector<real_type> params; putContent(params, xmlCoefs); if (params.size() == NumParams) Parameters = params; else { app_log() << "Changing number of Bspline parameters from " << params.size() << " to " << NumParams << ". Performing fit:\n"; // Fit function to new number of parameters const int numPoints = 500; BsplineFunctor<T> tmp_func(CuspValue); tmp_func.cutoff_radius = cutoff_radius; tmp_func.resize(params.size()); tmp_func.Parameters = params; tmp_func.reset(); std::vector<real_type> y(numPoints); Matrix<real_type> basis(numPoints,NumParams); std::vector<TinyVector<real_type,3> > derivs(NumParams); for (int i=0; i<numPoints; i++) { real_type r = (real_type)i / (real_type)numPoints * cutoff_radius; y[i] = tmp_func.evaluate(r); evaluateDerivatives(r, derivs); for (int j=0; j<NumParams; j++) basis(i,j) = derivs[j][0]; } resize(NumParams); LinearFit(y, basis, Parameters); app_log() << "New parameters are:\n"; for (int i=0; i < Parameters.size(); i++) app_log() << " " << Parameters[i] << std::endl; } if(optimize == "yes") { notOpt=false; } else { notOpt=true; } for (int i=0; i< NumParams; i++) { std::stringstream sstr; sstr << id << "_" << i; myVars.insert(sstr.str(),Parameters[i],!notOpt,optimize::LOGLINEAR_P); } app_log() << "Parameter Name Value\n"; myVars.print(app_log()); } xmlCoefs = xmlCoefs->next; } reset(); real_type zeros=0; for (int i=0; i< NumParams; i++) zeros+=Parameters[i]*Parameters[i]; return zeros>1.0e-12; //true if Parameters are not zero } void initialize(int numPoints, std::vector<real_type>& x, std::vector<real_type>& y , real_type cusp, real_type rcut, std::string& id, std::string& optimize ) { ReportEngine PRE("BsplineFunctor","initialize"); NumParams = numPoints; cutoff_radius = rcut; CuspValue = cusp; if (NumParams == 0) { PRE.error("You must specify a positive number of parameters for the Bspline jastrow function.",true); } app_log() << "Initializing BsplineFunctor from array. \n"; app_log() << " size = " << NumParams << " parameters " << std::endl; app_log() << " cusp = " << CuspValue << std::endl; app_log() << " rcut = " << cutoff_radius << std::endl; resize(NumParams); int npts = x.size(); Matrix<real_type> basis(npts,NumParams); std::vector<TinyVector<real_type,3> > derivs(NumParams); for (int i=0; i<npts; i++) { real_type r = x[i]; if (r > cutoff_radius) { PRE.error("Error in BsplineFunctor::initialize: r > cutoff_radius.",true); } evaluateDerivatives(r, derivs); for (int j=0; j<NumParams; j++) basis(i,j) = derivs[j][0]; } resize(NumParams); LinearFit(y, basis, Parameters); app_log() << "New parameters are:\n"; for (int i=0; i < Parameters.size(); i++) app_log() << " " << Parameters[i] << std::endl; #if QMC_BUILD_LEVEL < 5 if(optimize == "yes") { // Setup parameter names for (int i=0; i< NumParams; i++) { std::stringstream sstr; sstr << id << "_" << i; myVars.insert(sstr.str(),Parameters[i],true,optimize::LOGLINEAR_P); } app_log() << "Parameter Name Value\n"; myVars.print(app_log()); } else #endif { notOpt=true; app_log() << "Parameters of BsplineFunctor id:" <<id <<" are not being optimized.\n"; } reset(); } void reportStatus(std::ostream& os) { if (notOpt) return; myVars.print(os); } void checkOutVariables(const opt_variables_type& active) { if (notOpt) return; myVars.getIndex(active); } void checkInVariables(opt_variables_type& active) { if (notOpt) return; active.insertFrom(myVars); } void resetParameters(const opt_variables_type& active) { if (notOpt) return; for (int i=0; i<Parameters.size(); ++i) { int loc=myVars.where(i); if (loc>=0) Parameters[i]=myVars[i]=active[loc]; } // if (ResetCount++ == 100) // { // ResetCount = 0; // if(ReportLevel) print(); // } reset(); } // check if this object has active optimizable parameters bool isOptimizable() { if (notOpt) return false; for (int i=0; i<Parameters.size(); ++i) { int loc=myVars.where(i); if (loc>=0) return true; } return false; } }; template<typename T> inline T BsplineFunctor<T>::evaluateV(const int iat, const int iStart, const int iEnd, const T* restrict _distArray, T* restrict distArrayCompressed ) const { const real_type* restrict distArray = _distArray + iStart; ASSUME_ALIGNED(distArrayCompressed); int iCount = 0; const int iLimit = iEnd-iStart; #pragma vector always for ( int jat = 0; jat < iLimit; jat++ ) { real_type r = distArray[jat]; // pick the distances smaller than the cutoff and avoid the reference atom if ( r < cutoff_radius && iStart+jat != iat ) distArrayCompressed[iCount++] = distArray[jat]; } real_type d = 0.0; #pragma omp simd reduction (+:d) for ( int jat = 0; jat < iCount; jat++ ) { real_type r = distArrayCompressed[jat]; r *= DeltaRInv; int i = (int)r; real_type t = r - real_type(i); real_type tp0 = t*t*t; real_type tp1 = t*t; real_type tp2 = t; real_type d1 = SplineCoefs[i+0]*(A[ 0]*tp0 + A[ 1]*tp1 + A[ 2]*tp2 + A[ 3]); real_type d2 = SplineCoefs[i+1]*(A[ 4]*tp0 + A[ 5]*tp1 + A[ 6]*tp2 + A[ 7]); real_type d3 = SplineCoefs[i+2]*(A[ 8]*tp0 + A[ 9]*tp1 + A[10]*tp2 + A[11]); real_type d4 = SplineCoefs[i+3]*(A[12]*tp0 + A[13]*tp1 + A[14]*tp2 + A[15]); d += ( d1 + d2 + d3 + d4 ); } return d; } template<typename T> inline void BsplineFunctor<T>::evaluateVGL(const int iat, const int iStart, const int iEnd, const T* _distArray, T* restrict _valArray, T* restrict _gradArray, T* restrict _laplArray, T* restrict distArrayCompressed, int* restrict distIndices ) const { real_type dSquareDeltaRinv = DeltaRInv * DeltaRInv; constexpr real_type cZero(0); constexpr real_type cOne(1); constexpr real_type cMOne(-1); // START_MARK_FIRST(); ASSUME_ALIGNED(distIndices); ASSUME_ALIGNED(distArrayCompressed); int iCount = 0; int iLimit = iEnd-iStart; const real_type* distArray = _distArray + iStart; real_type* valArray = _valArray + iStart; real_type* gradArray = _gradArray + iStart; real_type* laplArray = _laplArray + iStart; #pragma vector always for ( int jat = 0; jat < iLimit; jat++ ) { real_type r = distArray[jat]; if ( r < cutoff_radius && iStart+jat != iat ) { distIndices[iCount] = jat; distArrayCompressed[iCount] = r; iCount++; } } #pragma omp simd for ( int j = 0; j < iCount; j++ ) { real_type r = distArrayCompressed[j]; int iScatter = distIndices[j]; real_type rinv = cOne/r; r *= DeltaRInv; int iGather = (int)r; real_type t = r - real_type(iGather); real_type tp0 = t*t*t; real_type tp1 = t*t; real_type tp2 = t; real_type sCoef0 = SplineCoefs[iGather+0]; real_type sCoef1 = SplineCoefs[iGather+1]; real_type sCoef2 = SplineCoefs[iGather+2]; real_type sCoef3 = SplineCoefs[iGather+3]; laplArray[iScatter] = dSquareDeltaRinv * (sCoef0*( d2A[ 2]*tp2 + d2A[ 3])+ sCoef1*( d2A[ 6]*tp2 + d2A[ 7])+ sCoef2*( d2A[10]*tp2 + d2A[11])+ sCoef3*( d2A[14]*tp2 + d2A[15])); gradArray[iScatter] = DeltaRInv * rinv * (sCoef0*( dA[ 1]*tp1 + dA[ 2]*tp2 + dA[ 3])+ sCoef1*( dA[ 5]*tp1 + dA[ 6]*tp2 + dA[ 7])+ sCoef2*( dA[ 9]*tp1 + dA[10]*tp2 + dA[11])+ sCoef3*( dA[13]*tp1 + dA[14]*tp2 + dA[15])); valArray[iScatter] = (sCoef0*(A[ 0]*tp0 + A[ 1]*tp1 + A[ 2]*tp2 + A[ 3])+ sCoef1*(A[ 4]*tp0 + A[ 5]*tp1 + A[ 6]*tp2 + A[ 7])+ sCoef2*(A[ 8]*tp0 + A[ 9]*tp1 + A[10]*tp2 + A[11])+ sCoef3*(A[12]*tp0 + A[13]*tp1 + A[14]*tp2 + A[15])); } } } #endif
ordering_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file ordering_op-inl.h * \brief Function definition of matrix related operators */ #ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #include <mxnet/operator_util.h> #include <dmlc/optional.h> #include <mshadow/tensor.h> #include <algorithm> #include <vector> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "./sort_op.h" #include "./indexing_op.h" namespace mshadow { template<typename xpu, int src_dim, typename DType, int dst_dim> inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src, Shape<dst_dim> target_shape) { CHECK_EQ(src.CheckContiguous(), true); return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_); } }; namespace mxnet { namespace op { // These enums are only visible within this header namespace topk_enum { enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth}; } // topk_enum struct TopKParam : public dmlc::Parameter<TopKParam> { dmlc::optional<int> axis; int k; int ret_typ; bool is_ascend; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose the top k indices." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(k).set_default(1) .describe("Number of top elements to select," " should be always smaller than or equal to the element number in the given axis." " A global sort is performed if set k < 1."); DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices) .add_enum("value", topk_enum::kReturnValue) .add_enum("indices", topk_enum::kReturnIndices) .add_enum("mask", topk_enum::kReturnMask) .add_enum("both", topk_enum::kReturnBoth) .describe("The return type.\n" " \"value\" means to return the top k values," " \"indices\" means to return the indices of the top k values," " \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values." " \"both\" means to return a list of both values and indices of top k elements."); DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); } }; struct SortParam : public dmlc::Parameter<SortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(SortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } }; struct ArgSortParam : public dmlc::Parameter<ArgSortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } }; inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape, int *batch_size, int *element_num, int *axis, int *k, bool *do_transpose, bool *is_ascend) { *do_transpose = false; *k = param.k; *is_ascend = param.is_ascend; // get batch_size, axis and element_num if (!static_cast<bool>(param.axis)) { // No axis given *axis = 0; *batch_size = 1; *element_num = src_shape.Size(); } else { *axis = param.axis.value(); if (*axis < 0) { *axis += src_shape.ndim(); } CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim())) << "Invalid axis! axis should be between 0 and " << src_shape.ndim() << ", found axis=" << *axis; *batch_size = src_shape.Size() / src_shape[*axis]; *element_num = src_shape[*axis]; if (*axis != static_cast<int>(src_shape.ndim()) - 1) { *do_transpose = true; } } // get k if (param.k <= 0) { *k = *element_num; } // get target_shape if (!static_cast<bool>(param.axis)) { if (param.ret_typ != topk_enum::kReturnMask) { *target_shape = mshadow::Shape1(*k); } else { *target_shape = src_shape; } } else { *target_shape = src_shape; if (param.ret_typ != topk_enum::kReturnMask) { (*target_shape)[*axis] = *k; } } CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than " << *element_num << ", get k = " << *k; } using namespace mshadow; template<typename xpu> void TopKSort(const Tensor<xpu, 1, real_t>& dat, const Tensor<xpu, 1, int>& ind, const Tensor<xpu, 1, char>& work, int K, int N, bool is_ascend, Stream<xpu> *s); template<> MSHADOW_FORCE_INLINE void TopKSort<cpu>(const Tensor<cpu, 1, real_t>& dat, const Tensor<cpu, 1, int>& ind, const Tensor<cpu, 1, char>& work, int K, int N, bool is_ascend, Stream<cpu> *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. const int M(dat.size(0)/N); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < M; ++i) { real_t *vals = dat.dptr_; int *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { std::sort(indices, indices+N, [&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; }); } } else { if (full_sort) { std::sort(indices, indices+N, [&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; }); } } real_t *buff = reinterpret_cast<real_t*>(work.dptr_)+i*K; for (int j = 0; j < K; ++j) { buff[j] = vals[indices[j]]; } std::copy(buff, buff+K, &vals[i*N]); } } #ifdef __CUDACC__ template<typename DType> MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) { // Negative indices denote undefined values which are considered arbitrary small resp. large. return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2))); } template<typename DType> MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2, bool is_ascend) { // In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals // [0,..,i1], [0,..i2] of the two lists that will be part of the merged list. int i1(K-1), i2(K-1); for (int i = 0; i < K; ++i) { if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) { --i2; } else { --i1; } } // Now merge the lists from back to front. for (int i = K; i--;) { if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) { val1[i] = val1[i1]; ind1[i] = ind1[i1]; --i1; } else { val1[i] = val2[i2]; ind1[i] = ind2[i2]; --i2; } } } template<typename DType> __global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) { // Buffer for pairwise reduction. extern __shared__ int buff[]; // Start of buffer sections associated with this thread. const int offset(threadIdx.x*K); int *ind_buff = &buff[offset]; DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset; // Initialize top-K values for this thread. for (int i = 0; i < K; ++i) { ind_buff[i] = -1; } // Range of values this thread cares about. Each thread block processes // a different batch item (i.e. a different set of ind/val where we // have to select the top-K elements). All threads within the same // block work on the same batch item. const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N); // Select top-K from this range and store it sorted in the buffer. // We assume a small K, so linear insertion is o.k. for (int i = first; i < last; i += blockDim.x) { DType cur_val(val[i]); int cur_ind(ind[i]); for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) { if (j+1 < K) { val_buff[j+1] = val_buff[j]; ind_buff[j+1] = ind_buff[j]; } val_buff[j] = cur_val; ind_buff[j] = cur_ind; } } // Recursive merge of sorted lists for this thread block. Note that blockDim.x is not // necessary a power of two, therefore the additional checks for last_s. for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x; last_s > 1; last_s = s, s = (s+1)/2) { __syncthreads(); if (threadIdx.x < s && threadIdx.x+s < last_s) { MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend); } } // Final updates on master thread. if (threadIdx.x == 0) { for (int i = 0; i < K; ++i) { ind[blockIdx.x*N+i] = ind_buff[i]; val[blockIdx.x*N+i] = val_buff[i]; } } } template<> MSHADOW_FORCE_INLINE void TopKSort<gpu>(const Tensor<gpu, 1, real_t>& dat, const Tensor<gpu, 1, int>& ind, const Tensor<gpu, 1, char>& work, int K, int N, bool is_ascend, Stream<gpu> *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); // Batch size. const int M(dat.size(0)/N); if (full_sort) { // Divide workspace into two parts. The first one is needed to store batch ids. const int id_size(sizeof(int)*ind.size(0)); Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s); Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s); mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work); if (M > 1) { // Back to back sorting. Note that mxnet::op::SortByKey is a stable sort. batch_id = ind / N; mxnet::op::SortByKey(batch_id, dat, true, &sort_work); batch_id = ind / N; mxnet::op::SortByKey(batch_id, ind, true, &sort_work); } } else { const int nthreads(mshadow::cuda::kBaseThreadNum); PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(real_t)), mshadow::Stream<gpu>::GetStream(s)>>> (K, N, dat.dptr_, ind.dptr_, is_ascend); } } #endif /*! * \brief Implementation of the TopK operation * * * \param ctx the running context * \param resource temporary resource handler * \param src the Source blob * \param ret the destination blobs * \param k the K elements to keep * \param param the topk parameters * \tparam xpu the device type. */ template<typename xpu> void TopKImpl(RunContext ctx, Resource resource, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; for (auto ret_ele : ret) { CHECK_EQ(ret_ele.type_flag_, src.type_flag_); } // 1. Parse and initialize information Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, real_t> sorted_dat; Tensor<xpu, 1, int> indices, sel_indices; Tensor<xpu, 2, real_t> mask_val; int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); Tensor<xpu, 3, real_t> dat = src.FlatTo3D<xpu, real_t>(axis, axis, s); size_t temp_size = 0; // Temp space needed by the gpu-based full sorts. temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, real_t, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<real_t, int, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += sizeof(int) * src.Size(); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(real_t) * src.Size()); size_t workspace_size = temp_size + sizeof(real_t) * src.Size() + sizeof(int) * src.Size(); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += sizeof(int) * batch_size * k + sizeof(real_t) * batch_size * k; } workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; sorted_dat = Tensor<xpu, 1, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += sizeof(real_t) * src.Size(); indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += sizeof(int) * src.Size(); if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1, kWriteTo, indices.dptr_); CHECK_EQ(sorted_dat.CheckContiguous(), true); CHECK_EQ(indices.CheckContiguous(), true); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += sizeof(int) * batch_size * k; mask_val = Tensor<xpu, 2, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr), Shape2(batch_size * k, 1), s); workspace_curr_ptr += sizeof(real_t) * batch_size * k; mask_val = scalar<real_t>(1); CHECK_EQ(sel_indices.CheckContiguous(), true); CHECK_EQ(mask_val.CheckContiguous(), true); } temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 2, real_t> ret_mask = ret[0].get_with_shape<xpu, 2, real_t>(Shape2(ret[0].Size(), 1), s); ret_mask = scalar<real_t>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } IndexFill(ret_mask, sel_indices, mask_val); } else if (param.ret_typ == topk_enum::kReturnIndices) { indices = F<mshadow_op::mod>(indices, element_num); if (do_transpose) { Tensor<xpu, 3, real_t> ret_indices = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s); ret_indices = tcast<real_t>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); } else { Tensor<xpu, 2, real_t> ret_indices = ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s); ret_indices = tcast<real_t>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k)); } } else { indices = F<mshadow_op::mod>(indices, element_num); if (do_transpose) { Tensor<xpu, 3, real_t> ret_value = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s); Tensor<xpu, 3, real_t> ret_indices = ret[1].FlatTo3D<xpu, real_t>(axis, axis, s); ret_value = transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)); ret_indices = tcast<real_t>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); } else { Tensor<xpu, 2, real_t> ret_value = ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s); Tensor<xpu, 2, real_t> ret_indices = ret[1].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s); ret_value = slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k); ret_indices = tcast<real_t>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k)); } } } template<typename xpu> void TopK(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); // TODO(sxjscience) We can support inplace in the future CHECK_EQ(req[0], kWriteTo) << "TopK does not support inplace"; TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, param); } template<typename xpu> void Sort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); CHECK_EQ(req[0], kWriteTo) << "Sort does not support inplace"; TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param); } template<typename xpu> void ArgSort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); CHECK_EQ(req[0], kWriteTo) << "ArgSort does not support inplace"; TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param); } template<typename xpu> void TopKBackward_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>(); const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); Tensor<xpu, 1, real_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, real_t>(Shape1(batch_size * k * 2 + batch_size), s); Tensor<xpu, 1, real_t> sel_indices = Tensor<xpu, 1, real_t>(workspace.dptr_, Shape1(batch_size * k), s); Tensor<xpu, 1, real_t> batch_shift = Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); Tensor<xpu, 1, real_t> dummy_index = Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k + batch_size, Shape1(batch_size * k), s); Tensor<xpu, 2, real_t> out_grad = inputs[0].get_with_shape<xpu, 2, real_t>(Shape2(inputs[0].shape_.Size(), 1), s); Tensor<xpu, 2, real_t> in_grad = outputs[0].get_with_shape<xpu, 2, real_t>(Shape2(outputs[0].shape_.Size(), 1), s); mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0.0f, static_cast<real_t>(element_num), kWriteTo, batch_shift.dptr_); if (do_transpose) { Tensor<xpu, 1, real_t> indices = inputs[2].FlatTo1D<xpu, real_t>(s); TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)), TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); sel_indices += indices; sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { Tensor<xpu, 2, real_t> indices = inputs[2].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s); sel_indices = reshape(indices + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0]) { in_grad = scalar<real_t>(0); IndexFill(in_grad, sel_indices, out_grad); } else if (kAddTo == req[0]) { // TODO(sxjscience) We can use AddTakeGrad in the future. // However, the current implementation of AddTakeGrad is not so efficient. mxnet_op::Kernel<range_fwd, xpu>::Launch(s, sel_indices.shape_.Size(), 1, 0.0f, 1.0f, kWriteTo, dummy_index.dptr_); mxnet::op::AddTakeGradLargeBatch(in_grad, sel_indices, dummy_index, out_grad); } else if (kNullOp == req[0]) { return; } else { LOG(FATAL) << "Not Implemented!"; } } inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { return static_cast<uint32_t>(1); } else { return static_cast<uint32_t>(2); } } inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { return static_cast<uint32_t>(2); } else { return static_cast<uint32_t>(1); } } inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { return ElemwiseAttr<int, type_is_none, type_assign, true, type_string>( attrs, in_attrs, out_attrs, -1); } inline bool TopKShapeImpl(const TopKParam& param, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { CHECK_EQ(out_attrs->size(), 1U); } else { CHECK_EQ(out_attrs->size(), 2U); } TShape& in_shape = (*in_attrs)[0]; int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; TShape target_shape; ParseTopKParam(in_shape, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape); } return true; } inline bool TopKShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); return TopKShapeImpl(param, in_attrs, out_attrs); } inline bool SortShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 9 + q * 9; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } } static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt) { kernel_tm.create(4 * 4, inch, outch); // G const float ktm[4][3] = { {1.0f, 0.0f, 0.0f}, {1.0f / 2, 1.0f / 2, 1.0f / 2}, {1.0f / 2, -1.0f / 2, 1.0f / 2}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[4][3]; for (int i = 0; i < 4; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 4; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 4; i++) { kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4 * 4, tiles, inch, 4u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered.channel(q); float* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 2; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; for (int i = 0; i < nRowBlocks; i++) { #if __AVX__ __m128 _d0, _d1, _d2, _d3; __m128 _w0, _w1, _w2, _w3; // load _d0 = _mm_loadu_ps(r0); _d1 = _mm_loadu_ps(r1); _d2 = _mm_loadu_ps(r2); _d3 = _mm_loadu_ps(r3); // w = B_t * d _w0 = _mm_sub_ps(_d0, _d2); _w1 = _mm_add_ps(_d1, _d2); _w2 = _mm_sub_ps(_d2, _d1); _w3 = _mm_sub_ps(_d3, _d1); // transpose d to d_t _MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3); // d = B_t * d_t _d0 = _mm_sub_ps(_w0, _w2); _d1 = _mm_add_ps(_w1, _w2); _d2 = _mm_sub_ps(_w2, _w1); _d3 = _mm_sub_ps(_w3, _w1); // save to out_tm _mm_storeu_ps(out_tm0, _d0); _mm_storeu_ps(out_tm0 + 4, _d1); _mm_storeu_ps(out_tm0 + 8, _d2); _mm_storeu_ps(out_tm0 + 12, _d3); #else float d0[4], d1[4], d2[4], d3[4]; float w0[4], w1[4], w2[4], w3[4]; float t0[4], t1[4], t2[4], t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; } // d = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm0[n + 4] = d1[n]; out_tm0[n + 8] = d2[n]; out_tm0[n + 12] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; out_tm0 += 16; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); for (int i = 0; i < tiles; i++) { float* output0_tm = out0_tm.row(i); float* output1_tm = out1_tm.row(i); float* output2_tm = out2_tm.row(i); float* output3_tm = out3_tm.row(i); #if __AVX__ float zero_val = 0.f; __m256 _sum0 = _mm256_broadcast_ss(&zero_val); __m256 _sum0n = _mm256_broadcast_ss(&zero_val); __m256 _sum1 = _mm256_broadcast_ss(&zero_val); __m256 _sum1n = _mm256_broadcast_ss(&zero_val); __m256 _sum2 = _mm256_broadcast_ss(&zero_val); __m256 _sum2n = _mm256_broadcast_ss(&zero_val); __m256 _sum3 = _mm256_broadcast_ss(&zero_val); __m256 _sum3n = _mm256_broadcast_ss(&zero_val); int q = 0; for (; q + 3 < inch; q += 4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q + 1).row(i); const float* r2 = bottom_blob_tm.channel(q + 2).row(i); const float* r3 = bottom_blob_tm.channel(q + 3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r0n = _mm256_loadu_ps(r0 + 8); // k0 __m256 _k0 = _mm256_loadu_ps(k0); __m256 _k0n = _mm256_loadu_ps(k0 + 8); __m256 _k1 = _mm256_loadu_ps(k1); __m256 _k1n = _mm256_loadu_ps(k1 + 8); __m256 _k2 = _mm256_loadu_ps(k2); __m256 _k2n = _mm256_loadu_ps(k2 + 8); __m256 _k3 = _mm256_loadu_ps(k3); __m256 _k3n = _mm256_loadu_ps(k3 + 8); _sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n); // k1 _r0 = _mm256_loadu_ps(r1); _r0n = _mm256_loadu_ps(r1 + 8); _k0 = _mm256_loadu_ps(k0 + 16); _k0n = _mm256_loadu_ps(k0 + 24); _k1 = _mm256_loadu_ps(k1 + 16); _k1n = _mm256_loadu_ps(k1 + 24); _k2 = _mm256_loadu_ps(k2 + 16); _k2n = _mm256_loadu_ps(k2 + 24); _k3 = _mm256_loadu_ps(k3 + 16); _k3n = _mm256_loadu_ps(k3 + 24); _sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n); // k2 _r0 = _mm256_loadu_ps(r2); _r0n = _mm256_loadu_ps(r2 + 8); _k0 = _mm256_loadu_ps(k0 + 32); _k0n = _mm256_loadu_ps(k0 + 40); _k1 = _mm256_loadu_ps(k1 + 32); _k1n = _mm256_loadu_ps(k1 + 40); _k2 = _mm256_loadu_ps(k2 + 32); _k2n = _mm256_loadu_ps(k2 + 40); _k3 = _mm256_loadu_ps(k3 + 32); _k3n = _mm256_loadu_ps(k3 + 40); _sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n); // k3 _r0 = _mm256_loadu_ps(r3); _r0n = _mm256_loadu_ps(r3 + 8); _k0 = _mm256_loadu_ps(k0 + 48); _k0n = _mm256_loadu_ps(k0 + 56); _k1 = _mm256_loadu_ps(k1 + 48); _k1n = _mm256_loadu_ps(k1 + 56); _k2 = _mm256_loadu_ps(k2 + 48); _k2n = _mm256_loadu_ps(k2 + 56); _k3 = _mm256_loadu_ps(k3 + 48); _k3n = _mm256_loadu_ps(k3 + 56); _sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n); } for (; q < inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r0n = _mm256_loadu_ps(r0 + 8); __m256 _k0 = _mm256_loadu_ps(k0); __m256 _k0n = _mm256_loadu_ps(k0 + 8); __m256 _k1 = _mm256_loadu_ps(k1); __m256 _k1n = _mm256_loadu_ps(k1 + 8); __m256 _k2 = _mm256_loadu_ps(k2); __m256 _k2n = _mm256_loadu_ps(k2 + 8); __m256 _k3 = _mm256_loadu_ps(k3); __m256 _k3n = _mm256_loadu_ps(k3 + 8); _sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n); } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum0n); _mm256_storeu_ps(output1_tm, _sum1); _mm256_storeu_ps(output1_tm + 8, _sum1n); _mm256_storeu_ps(output2_tm, _sum2); _mm256_storeu_ps(output2_tm + 8, _sum2n); _mm256_storeu_ps(output3_tm, _sum3); _mm256_storeu_ps(output3_tm + 8, _sum3n); #else float sum0[16] = {0.0f}; float sum1[16] = {0.0f}; float sum2[16] = {0.0f}; float sum3[16] = {0.0f}; int q = 0; for (; q + 3 < inch; q += 4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q + 1).row(i); const float* r2 = bottom_blob_tm.channel(q + 2).row(i); const float* r3 = bottom_blob_tm.channel(q + 3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); for (int n = 0; n < 16; n++) { sum0[n] += r0[n] * k0[n]; k0 += 16; sum0[n] += r1[n] * k0[n]; k0 += 16; sum0[n] += r2[n] * k0[n]; k0 += 16; sum0[n] += r3[n] * k0[n]; k0 -= 16 * 3; sum1[n] += r0[n] * k1[n]; k1 += 16; sum1[n] += r1[n] * k1[n]; k1 += 16; sum1[n] += r2[n] * k1[n]; k1 += 16; sum1[n] += r3[n] * k1[n]; k1 -= 16 * 3; sum2[n] += r0[n] * k2[n]; k2 += 16; sum2[n] += r1[n] * k2[n]; k2 += 16; sum2[n] += r2[n] * k2[n]; k2 += 16; sum2[n] += r3[n] * k2[n]; k2 -= 16 * 3; sum3[n] += r0[n] * k3[n]; k3 += 16; sum3[n] += r1[n] * k3[n]; k3 += 16; sum3[n] += r2[n] * k3[n]; k3 += 16; sum3[n] += r3[n] * k3[n]; k3 -= 16 * 3; } } for (; q < inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); for (int n = 0; n < 16; n++) { sum0[n] += r0[n] * k0[n]; sum1[n] += r0[n] * k1[n]; sum2[n] += r0[n] * k2[n]; sum3[n] += r0[n] * k3[n]; } } for (int n = 0; n < 16; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i = 0; i < tiles; i++) { float* output0_tm = out0_tm.row(i); float sum0[16] = {0.0f}; int q = 0; for (; q + 3 < inch; q += 4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q + 1).row(i); const float* r2 = bottom_blob_tm.channel(q + 2).row(i); const float* r3 = bottom_blob_tm.channel(q + 3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q + 1); const float* k2 = kernel0_tm.row(q + 2); const float* k3 = kernel0_tm.row(q + 3); for (int n = 0; n < 16; n++) { sum0[n] += r0[n] * k0[n]; sum0[n] += r1[n] * k1[n]; sum0[n] += r2[n] * k2[n]; sum0[n] += r3[n] * k3[n]; } } for (; q < inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); for (int n = 0; n < 16; n++) { sum0[n] += r0[n] * k0[n]; } } for (int n = 0; n < 16; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); } { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { float* outRow0 = out.row(j * 2); float* outRow1 = out.row(j * 2 + 1); for (int i = 0; i < nRowBlocks; i++) { float* out_tile = out_tm.row(j * nRowBlocks + i); float s0[4], s1[4], s2[4], s3[4]; float w0[4], w1[4]; float d0[2], d1[2], d2[2], d3[2]; float o0[2], o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 4]; s2[n] = out_tile[n + 8]; s3[n] = out_tile[n + 12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n] + bias0; o1[n] = d1[n] - d2[n] + d3[n] + bias0; } // save to top blob tm outRow0[0] = o0[0]; outRow0[1] = o0[1]; outRow1[0] = o1[0]; outRow1[1] = o1[1]; outRow0 += 2; outRow1 += 2; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch, const Option& opt) { Mat kernel_tm(6 * 6, inch, outch); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r = 0; r < 9; r++) { Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4); int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = (const float*)kernel_tm.channel(p); const float* kernel1 = (const float*)kernel_tm.channel(p + 1); const float* kernel2 = (const float*)kernel_tm.channel(p + 2); const float* kernel3 = (const float*)kernel_tm.channel(p + 3); const float* kernel4 = (const float*)kernel_tm.channel(p + 4); const float* kernel5 = (const float*)kernel_tm.channel(p + 5); const float* kernel6 = (const float*)kernel_tm.channel(p + 6); const float* kernel7 = (const float*)kernel_tm.channel(p + 7); float* ktmp = kernel_tm_test.channel(p / 8); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = (const float*)kernel_tm.channel(p); const float* kernel1 = (const float*)kernel_tm.channel(p + 1); const float* kernel2 = (const float*)kernel_tm.channel(p + 2); const float* kernel3 = (const float*)kernel_tm.channel(p + 3); float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = (const float*)kernel_tm.channel(p); float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles * 9, elemsize, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row(q); float* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row(q); float* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row(q); float* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row(q); float* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row(q); float* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row(q); float* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row(q); float* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row(q); float* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row(q); #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_comp_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_comp_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_comp_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_comp_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_comp_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_comp_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_comp_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_comp_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_comp_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #if (defined _WIN32 && !(defined __MINGW32__) && !__clang__) { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_comp_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_comp_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_comp_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_comp_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_comp_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_comp_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_comp_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_comp_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_comp_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); float* output4_tm = top_blob_tm.channel(p + 4); float* output5_tm = top_blob_tm.channel(p + 5); float* output6_tm = top_blob_tm.channel(p + 6); float* output7_tm = top_blob_tm.channel(p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test[r].channel(p / 8); const float* r0 = bottom_blob_tm.channel(tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for (; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_comp_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_comp_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_comp_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_comp_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_comp_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_comp_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_comp_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_comp_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_comp_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_comp_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_comp_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_comp_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_comp_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_comp_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_comp_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_comp_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_comp_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_comp_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_comp_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_comp_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_comp_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_comp_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_comp_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_comp_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_comp_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_comp_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_comp_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_comp_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_comp_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_comp_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_comp_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_comp_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_comp_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_comp_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_comp_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_comp_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_comp_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_comp_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4); const float* r0 = bottom_blob_tm.channel(tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_comp_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_comp_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_comp_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4); const float* r0 = bottom_blob_tm.channel(tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // float* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const float* r0 = bottom_blob_tm.channel(q).row<float>(i); // const float* k0 = kernel0_tm.row<float>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator); } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; const float* img = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 9 + q * 9; const float* r0 = img; const float* r1 = img + w; const float* r2 = img + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } }
indirectaccesssharebase-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // This example is to mimic a memory access pattern extracted from an LLNL proxy app. // Two pointers have distance of 12. // index set has no two indices with distance of 12 // So there is no loop carried dependence. // Liao, 12/13/2016 #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf("Error, malloc() returns NULL. End execution. \n"); return 1; } double * xa1 = base; double * xa3 = base + 12; int i; for (i =521; i<= 2025; ++i) { base[i]=0.0; } #pragma omp parallel for for (i =0; i< N; ++i) // this level of loop has no loop carried dependence { int idx = indexSet[i]; xa1[idx]+= 1.0; xa3[idx]+= 3.0; } // verify the results, no overlapping of xa1 vs. xa3, no addition happens to the same element twice for (i =521; i<= 2025; ++i) { // printf ("%f ", base[i]); assert (base[i]!=4.0); } free (base); return 0; }
RCCE.h
// // Copyright 2010 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef RCCE_H #define RCCE_H #include <stdlib.h> #include <stdio.h> #define _RCCE "1.0.7 release" // little trick to allow the application to be called "RCCE_APP" under // OpenMP, and "main" otherwise #ifndef _OPENMP #define RCCE_APP main #endif // modify next line for BareMetal, which supports stdout, but not stdferr #define STDERR stdout #define LOG2_LINE_SIZE 5 #define RCCE_LINE_SIZE (1<<LOG2_LINE_SIZE) // RCCE_BUFF_SIZE_MAX is space per UE, which is half of the space per tile #define RCCE_BUFF_SIZE_MAX (1<<13) #define RCCE_MAXNP 48 #define RCCE_SUCCESS 0 #define RCCE_ERROR_BASE 1234321 #define RCCE_ERROR_TARGET (RCCE_ERROR_BASE + 1) #define RCCE_ERROR_SOURCE (RCCE_ERROR_BASE + 2) #define RCCE_ERROR_ID (RCCE_ERROR_BASE + 3) #define RCCE_ERROR_MESSAGE_LENGTH (RCCE_ERROR_BASE + 4) #define RCCE_ERROR_FLAG_UNDEFINED (RCCE_ERROR_BASE + 5) #define RCCE_ERROR_NUM_UES (RCCE_ERROR_BASE + 6) #define RCCE_ERROR_DATA_OVERLAP (RCCE_ERROR_BASE + 7) #define RCCE_ERROR_ALIGNMENT (RCCE_ERROR_BASE + 8) #define RCCE_ERROR_DEBUG_FLAG (RCCE_ERROR_BASE + 9) #define RCCE_ERROR_FLAG_NOT_IN_COMM_BUFFER (RCCE_ERROR_BASE + 10) #define RCCE_ERROR_FLAG_STATUS_UNDEFINED (RCCE_ERROR_BASE + 11) #define RCCE_ERROR_FLAG_NOT_ALLOCATED (RCCE_ERROR_BASE + 12) #define RCCE_ERROR_VAL_UNDEFINED (RCCE_ERROR_BASE + 13) #define RCCE_ERROR_INVALID_ERROR_CODE (RCCE_ERROR_BASE + 14) #define RCCE_ERROR_RPC_NOT_ALLOCATED (RCCE_ERROR_BASE + 15) #define RCCE_ERROR_RPC_INTERNAL (RCCE_ERROR_BASE + 16) #define RCCE_ERROR_MULTIPLE_RPC_REQUESTS (RCCE_ERROR_BASE + 17) #define RCCE_ERROR_FDIVIDER (RCCE_ERROR_BASE + 18) #define RCCE_ERROR_FREQUENCY_EXCEEDED (RCCE_ERROR_BASE + 19) #define RCCE_ERROR_NO_ACTIVE_RPC_REQUEST (RCCE_ERROR_BASE + 20) #define RCCE_ERROR_STALE_RPC_REQUEST (RCCE_ERROR_BASE + 21) #define RCCE_ERROR_COMM_UNDEFINED (RCCE_ERROR_BASE + 22) #define RCCE_ERROR_ILLEGAL_OP (RCCE_ERROR_BASE + 23) #define RCCE_ERROR_ILLEGAL_TYPE (RCCE_ERROR_BASE + 24) #define RCCE_ERROR_MALLOC (RCCE_ERROR_BASE + 25) #define RCCE_ERROR_COMM_INITIALIZED (RCCE_ERROR_BASE + 26) #define RCCE_ERROR_CORE_NOT_IN_HOSTFILE (RCCE_ERROR_BASE + 27) #define RCCE_MAX_ERROR_STRING 45 #define RCCE_DEBUG_ALL 111111 #define RCCE_DEBUG_SYNCH 111444 #define RCCE_DEBUG_COMM 111555 #define RCCE_DEBUG_RPC 111666 #define RCCE_DEBUG_DEBUG 111888 #define RCCE_FLAG_SET 1 #define RCCE_FLAG_UNSET 0 #define RCCE_NUM_OPS 4 #define RCCE_OP_BASE 23232323 #define RCCE_SUM (RCCE_OP_BASE) #define RCCE_MIN (RCCE_OP_BASE+1) #define RCCE_MAX (RCCE_OP_BASE+2) #define RCCE_PROD (RCCE_OP_BASE+3) #define RCCE_TYPE_BASE 63636363 #define RCCE_INT (RCCE_TYPE_BASE) #define RCCE_LONG (RCCE_TYPE_BASE+1) #define RCCE_FLOAT (RCCE_TYPE_BASE+2) #define RCCE_DOUBLE (RCCE_TYPE_BASE+3) // MPB pointer type typedef volatile unsigned char* t_vcharp; #ifdef SINGLEBITFLAGS typedef struct { int location; /* location of bit within line (0-255) */ t_vcharp line_address; /* start of cache line containing flag */ } RCCE_FLAG; #else typedef volatile int *RCCE_FLAG; #endif typedef int RCCE_FLAG_STATUS; typedef struct { int size; int my_rank; int initialized; int member[RCCE_MAXNP]; RCCE_FLAG gather; RCCE_FLAG release; } RCCE_COMM; #ifdef RC_POWER_MANAGEMENT typedef struct{ int release; int old_voltage_level; int new_voltage_level; int old_frequency_divider; int new_frequency_divider; long long start_cycle; } RCCE_REQUEST; int RCCE_power_domain(void); int RCCE_iset_power(int, RCCE_REQUEST *, int *, int *); int RCCE_wait_power(RCCE_REQUEST *); int RCCE_set_frequency_divider(int, int *); int RCCE_power_domain_master(void); int RCCE_power_domain_size(void); #endif int RCCE_init(int *, char***); int RCCE_finalize(void); double RCCE_wtime(void); int RCCE_ue(void); int RCCE_num_ues(void); #ifdef GORY t_vcharp RCCE_malloc(size_t); t_vcharp RCCE_malloc_request(size_t, size_t *); void RCCE_free(t_vcharp); int RCCE_put(t_vcharp, t_vcharp, int, int); int RCCE_get(t_vcharp, t_vcharp, int, int); int RCCE_wait_until(RCCE_FLAG, RCCE_FLAG_STATUS); int RCCE_flag_alloc(RCCE_FLAG *); int RCCE_flag_free(RCCE_FLAG *); int RCCE_flag_write(RCCE_FLAG *, RCCE_FLAG_STATUS, int); int RCCE_flag_read(RCCE_FLAG, RCCE_FLAG_STATUS *, int); int RCCE_send(char *, t_vcharp, size_t, RCCE_FLAG *, RCCE_FLAG *, size_t, int); int RCCE_recv(char *, t_vcharp, size_t, RCCE_FLAG *, RCCE_FLAG *, size_t, int); int RCCE_recv_test(char *, t_vcharp, size_t, RCCE_FLAG *, RCCE_FLAG *, size_t, int, int *); #else int RCCE_send(char *, size_t, int); int RCCE_recv(char *, size_t, int); int RCCE_recv_test(char *, size_t, int, int *); int RCCE_allreduce(char *, char *, int, int, int, RCCE_COMM); int RCCE_reduce(char *, char *, int, int, int, int, RCCE_COMM); int RCCE_bcast(char *, size_t, int, RCCE_COMM); #endif int RCCE_comm_split(int (*)(int, void *), void *, RCCE_COMM *); int RCCE_comm_free(RCCE_COMM *); int RCCE_comm_size(RCCE_COMM, int *); int RCCE_comm_rank(RCCE_COMM, int *); void RCCE_fence(void); int RCCE_barrier(RCCE_COMM *); int RCCE_error_string(int, char *, int *); int RCCE_debug_set(int); int RCCE_debug_unset(int); extern RCCE_COMM RCCE_COMM_WORLD; #ifdef RC_POWER_MANAGEMENT extern RCCE_COMM RCCE_P_COMM; #define RCCE_POWER_DEFAULT -99999 #endif #ifdef _OPENMP #pragma omp threadprivate (RCCE_COMM_WORLD) #ifdef RC_POWER_MANAGEMENT #pragma omp threadprivate (RCCE_P_COMM) #endif #endif #endif
chlpca.h
/* # # File : chlpca.cpp # ( C++ source file ) # # Description : Example of use for the CImg plugin 'plugins/chlpca.h'. # This file is a part of the CImg Library project. # ( http://cimg.eu ) # # Copyright : Jerome Boulanger # ( http://www.irisa.fr/vista/Equipe/People/Jerome.Boulanger.html ) # # # License : CeCILL v2.0 # ( http://www.cecill.info/licences/Licence_CeCILL_V2-en.html ) # # This software is governed by the CeCILL license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL license and that you accept its terms. # */ #ifndef cimg_plugin_chlpca #define cimg_plugin_chlpca // Define some useful macros. //! Some loops #define cimg_for_step1(bound,i,step) for (int i = 0; i<(int)(bound); i+=step) #define cimg_for_stepX(img,x,step) cimg_for_step1((img)._width,x,step) #define cimg_for_stepY(img,y,step) cimg_for_step1((img)._height,y,step) #define cimg_for_stepZ(img,z,step) cimg_for_step1((img)._depth,z,step) #define cimg_for_stepXY(img,x,y,step) cimg_for_stepY(img,y,step) cimg_for_stepX(img,x,step) #define cimg_for_stepXYZ(img,x,y,step) cimg_for_stepZ(img,z,step) cimg_for_stepY(img,y,step) cimg_for_stepX(img,x,step) //! Loop for point J(xj,yj) in the neighborhood of a point I(xi,yi) of size (2*rx+1,2*ry+1) /** Point J is kept inside the boundaries of the image img. example of summing the pixels values in a neighborhood 11x11 cimg_forXY(img,xi,yi) cimg_for_windowXY(img,xi,yi,xj,yj,5,5) dest(yi,yi) += src(xj,yj); **/ #define cimg_forXY_window(img,xi,yi,xj,yj,rx,ry) \ for (int yi0=cimg::max(0,yi-ry), yi1=cimg::min(yi + ry,(int)img.height() - 1), yj=yi0;yj<=yi1;++yj) \ for (int xi0=cimg::max(0,xi-rx), xi1=cimg::min(xi + rx,(int)img.width() - 1), xj=xi0;xj<=xi1;++xj) #define cimg_forXYZ_window(img,xi,yi,zi,xj,yj,zj,rx,ry,rz) \ for (int zi0=cimg::max(0,zi-rz), zi1=cimg::min(zi + rz,(int)img.depth() - 1) , zj=zi0;zj<=zi1;++zj) \ for (int yi0=cimg::max(0,yi-ry), yi1=cimg::min(yi + ry,(int)img.height() - 1), yj=yi0;yj<=yi1;++yj) \ for (int xi0=cimg::max(0,xi-rx), xi1=cimg::min(xi + rx,(int)img.width() - 1) , xj=xi0;xj<=xi1;++xj) //! Crop a patch in the image around position x,y,z and return a column vector /** \param x x-coordinate of the center of the patch \param y y-coordinate of the center of the patch \param z z-coordinate of the center of the patch \param px the patch half width \param px the patch half height \param px the patch half depth \return img.get_crop(x0,y0,z0,x1,y1,z1).unroll('y'); **/ CImg<T> get_patch(int x, int y, int z, int px, int py, int pz) const { if (depth() == 1){ const int x0 = x - px, y0 = y - py, x1 = x + px, y1 = y + py; return get_crop(x0, y0, x1, y1).unroll('y'); } else { const int x0 = x - px, y0 = y - py, z0 = z - pz, x1 = x + px, y1 = y + py, z1 = z + pz; return get_crop(x0, y0, z0, x1, y1, z1).unroll('y'); } } //! Extract a local patch dictionnary around point xi,yi,zi CImg<T> get_patch_dictionnary(const int xi, const int yi, const int zi, const int px, const int py, const int pz, const int wx, const int wy, const int wz, int & idc) const { const int n = (2*wx + 1) * (2*wy + 1) * (2 * (depth()==1?0:wz) + 1), d = (2*px + 1) * (2*py + 1) * (2 * (depth()==1?0:px) + 1) * spectrum(); CImg<> S(n, d); int idx = 0; if (depth() == 1) { cimg_forXY_window((*this), xi, yi, xj, yj, wx, wy){ CImg<T> patch = get_patch(xj, yj, 0, px, py, 1); cimg_forY(S,y) S(idx,y) = patch(y); if (xj==xi && yj==yi) idc = idx; idx++; } } else { cimg_forXYZ_window((*this), xi,yi,zi,xj,yj,zj,wx,wy,wz){ CImg<T> patch = get_patch(xj, yj, zj, px, py, pz); cimg_forY(S,y) S(idx,y) = patch(y); if (xj==xi && yj==yi && zj==zi) idc = idx; idx++; } } S.columns(0, idx - 1); return S; } //! Add a patch to the image /** \param x x-coordinate of the center of the patch \param y y-coordinate of the center of the patch \param z z-coordinate of the center of the patch \param img the patch as a 1D column vector \param px the patch half width \param px the patch half height \param px the patch half depth **/ CImg<T> & add_patch(const int xi, const int yi, const int zi, const CImg<T> & patch, const int px, const int py, const int pz) { const int x0 = xi - px, y0 = yi - py, z0 = (depth() == 1 ? 0 : zi - pz), sx = 2 * px + 1, sy = 2 * py + 1, sz = (depth() == 1 ? 1 : 2 * pz +1); draw_image(x0, y0, z0, 0, patch.get_resize(sx, sy, sz, spectrum(), -1), -1); return (*this); } //! Add a constant patch to the image /** \param x x-coordinate of the center of the patch \param y y-coordinate of the center of the patch \param z z-coordinate of the center of the patch \param value in the patch \param px the patch half width \param px the patch half height \param px the patch half depth **/ CImg<T> & add_patch(const int xi, const int yi, const int zi, const T value, const int px, const int py, const int pz) { const int x0 = xi - px, y0 = yi - py, z0 = (depth() == 1 ? 0 : zi - pz), x1 = xi + px, y1 = yi + py, z1 = (depth() == 1 ? 0 : zi + pz); draw_rectangle(x0, y0, z0, 0, x1, y1, z1, spectrum()-1, value, -1); return (*this); } //! CHLPCA denoising from the PhD thesis of Hu Haijuan /** \param px the patch half width \param py the patch half height \param pz the patch half depth \param wx the training region half width \param wy the training region half height \param wz the training region half depth \param nstep the subsampling of the image domain \param nsim the number of patches used for training as a factor of the patch size \param lambda_min the threshold on the eigen values of the PCA for dimension reduction \param threshold the threshold on the value of the coefficients \param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method \note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799 **/ CImg<T> get_chlpca(const int px, const int py, const int pz, const int wx, const int wy, const int wz, const int nstep, const float nsim, const float lambda_min, const float threshold, const float noise_std, const bool pca_use_svd) const { const int nd = (2*px + 1) * (2*py + 1) * (depth()==1?1:2*pz + 1) * spectrum(), K = (int)(nsim * nd); #ifdef DEBUG fprintf(stderr,"chlpca: p:%dx%dx%d,w:%dx%dx%d,nd:%d,K:%d\n", 2*px + 1,2*py + 1,2*pz + 1,2*wx + 1,2*wy + 1,2*wz + 1,nd,K); #endif float sigma; if (noise_std<0) sigma = (float)std::sqrt(variance_noise()); else sigma = noise_std; CImg<T> dest(*this), count(*this); dest.fill(0); count.fill(0); cimg_for_stepZ(*this,zi,(depth()==1||pz==0)?1:nstep){ #ifdef cimg_use_openmp #pragma omp parallel for #endif cimg_for_stepXY((*this),xi,yi,nstep){ // extract the training region X int idc = 0; CImg<T> S = get_patch_dictionnary(xi,yi,zi,px,py,pz,wx,wy,wz,idc); // select the K most similar patches within the training set CImg<T> Sk(S); CImg<unsigned int> index(S.width()); if (K < Sk.width() - 1){ CImg<T> mse(S.width()); CImg<unsigned int> perms; cimg_forX(S,x) { mse(x) = (T)S.get_column(idc).MSE(S.get_column(x)); } mse.sort(perms,true); cimg_foroff(perms,i) { cimg_forY(S,j) Sk(i,j) = S(perms(i),j); index(perms(i)) = i; } Sk.columns(0, K); perms.threshold(K); } else { cimg_foroff(index,i) index(i)=i; } // centering the patches CImg<T> M(1, Sk.height(), 1, 1, 0); cimg_forXY(Sk,x,y) { M(y) += Sk(x,y); } M /= (T)Sk.width(); cimg_forXY(Sk,x,y) { Sk(x,y) -= M(y); } // compute the principal component of the training set S CImg<T> P, lambda; if (pca_use_svd) { CImg<T> V; Sk.get_transpose().SVD(V,lambda,P,true,100); } else { (Sk * Sk.get_transpose()).symmetric_eigen(lambda, P); lambda.sqrt(); } // dimension reduction int s = 0; const T tx = (T)(std::sqrt((double)Sk.width()-1.0) * lambda_min * sigma); while((lambda(s) > tx) && (s < ((int)lambda.size() - 1))) { s++; } P.columns(0,s); // project all the patches on the basis (compute scalar product) Sk = P.get_transpose() * Sk; // threshold the coefficients if (threshold > 0) { Sk.threshold(threshold, 1); } // project back to pixel space Sk = P * Sk; // recenter the patches cimg_forXY(Sk,x,y) { Sk(x,y) += M(y); } int j = 0; cimg_forXYZ_window((*this),xi,yi,zi,xj,yj,zj,wx,wy,wz){ const int id = index(j); if (id < Sk.width()) { dest.add_patch(xj, yj, zj, Sk.get_column(id), px, py, pz); count.add_patch(xj, yj, zj, (T)1, px, py, pz); } j++; } } } cimg_foroff(dest, i) { if(count(i) != 0) { dest(i) /= count(i); } else { dest(i) = (*this)(i); } } return dest; } //! CHLPCA denoising from the PhD thesis of Hu Haijuan /** \param px the patch half width \param px the patch half height \param px the patch half depth \param wx the training region half width \param wy the training region half height \param wz the training region half depth \param nstep the subsampling of the image domain \param nsim the number of patches used for training as a factor of the patch size \param lambda_min the threshold on the eigen values of the PCA for dimension reduction \param threshold the threshold on the value of the coefficients \param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method \note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799 **/ CImg<T> & chlpca(const int px, const int py, const int pz, const int wx, const int wy, const int wz, const int nstep, const float nsim, const float lambda_min, const float threshold, const float noise_std, const bool pca_use_svd) { (*this) = get_chlpca(px, py, pz, wx, wy, wz, nstep, nsim, lambda_min, threshold, noise_std, pca_use_svd); return (*this); } //! CHLPCA denoising from the PhD thesis of Hu Haijuan /** \param p the patch half size \param w the training region half size \param nstep the subsampling of the image domain \param nsim the number of patches used for training as a factor of the patch size \param lambda_min the threshold on the eigen values of the PCA for dimension reduction \param threshold the threshold on the value of the coefficients \param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method \note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799 **/ CImg<T> get_chlpca(const int p=3, const int w=10, const int nstep=5, const float nsim=10, const float lambda_min=2, const float threshold = -1, const float noise_std=-1, const bool pca_use_svd=true) const { if (depth()==1) return get_chlpca(p, p, 0, w, w, 0, nstep, nsim, lambda_min, threshold, noise_std, pca_use_svd); else return get_chlpca(p, p, p, w, w, w, nstep, nsim, lambda_min, threshold, noise_std, pca_use_svd); } CImg<T> chlpca(const int p=3, const int w=10, const int nstep=5, const float nsim=10, const float lambda_min=2, const float threshold = -1, const float noise_std=-1, const bool pca_use_svd=true) { (*this) = get_chlpca(p, w, nstep, nsim, lambda_min, threshold, noise_std, pca_use_svd); return (*this); } #endif /* cimg_plugin_chlpca */
HYPRE_IJMatrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * HYPRE_IJMatrix interface * *****************************************************************************/ #include "./_hypre_IJ_mv.h" #include "../HYPRE.h" /*-------------------------------------------------------------------------- * HYPRE_IJMatrixCreate *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixCreate( MPI_Comm comm, HYPRE_BigInt ilower, HYPRE_BigInt iupper, HYPRE_BigInt jlower, HYPRE_BigInt jupper, HYPRE_IJMatrix *matrix ) { HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; HYPRE_BigInt *info; HYPRE_Int num_procs; HYPRE_Int myid; hypre_IJMatrix *ijmatrix; HYPRE_BigInt row0, col0, rowN, colN; ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ijmatrix) = comm; hypre_IJMatrixObject(ijmatrix) = NULL; hypre_IJMatrixTranslator(ijmatrix) = NULL; hypre_IJMatrixAssumedPart(ijmatrix) = NULL; hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED; hypre_IJMatrixAssembleFlag(ijmatrix) = 0; hypre_IJMatrixPrintLevel(ijmatrix) = 0; hypre_IJMatrixOMPFlag(ijmatrix) = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &myid); if (ilower > iupper+1 || ilower < 0) { hypre_error_in_arg(2); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (iupper < -1) { hypre_error_in_arg(3); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jlower > jupper+1 || jlower < 0) { hypre_error_in_arg(4); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jupper < -1) { hypre_error_in_arg(5); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } info = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); row_partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); row_partitioning[0] = ilower; row_partitioning[1] = iupper+1; col_partitioning[0] = jlower; col_partitioning[1] = jupper+1; /* now we need the global number of rows and columns as well as the global first row and column index */ /* proc 0 has the first row and col */ if (myid == 0) { info[0] = ilower; info[1] = jlower; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm); row0 = info[0]; col0 = info[1]; /* proc (num_procs-1) has the last row and col */ if (myid == (num_procs-1)) { info[0] = iupper; info[1] = jupper; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs-1, comm); rowN = info[0]; colN = info[1]; hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0; hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0; hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1; hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1; hypre_TFree(info, HYPRE_MEMORY_HOST); hypre_IJMatrixRowPartitioning(ijmatrix) = row_partitioning; hypre_IJMatrixColPartitioning(ijmatrix) = col_partitioning; *matrix = (HYPRE_IJMatrix) ijmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (ijmatrix) { if (hypre_IJMatrixRowPartitioning(ijmatrix) == hypre_IJMatrixColPartitioning(ijmatrix)) { hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix), HYPRE_MEMORY_HOST); } else { hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix), HYPRE_MEMORY_HOST); hypre_TFree(hypre_IJMatrixColPartitioning(ijmatrix), HYPRE_MEMORY_HOST); } if hypre_IJMatrixAssumedPart(ijmatrix) { hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix)); } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixDestroyParCSR( ijmatrix ); } else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 ) { hypre_error_in_arg(1); return hypre_error_flag; } } hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixInitializeParCSR( ijmatrix ) ; } else { hypre_error_in_arg(1); } return hypre_error_flag; } HYPRE_Int HYPRE_IJMatrixInitialize_v2( HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixInitializeParCSR_v2( ijmatrix, memory_location ) ; } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix, HYPRE_Int print_level ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixPrintLevel(ijmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- * This is a helper routine to compute a prefix sum of integer values. * * The current implementation is okay for modest numbers of threads. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PrefixSumInt(HYPRE_Int nvals, HYPRE_Int *vals, HYPRE_Int *sums) { HYPRE_Int j, nthreads, bsize; nthreads = hypre_NumThreads(); bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */ if (nvals < nthreads || bsize == 1) { sums[0] = 0; for (j=1; j < nvals; j++) sums[j] += sums[j-1] + vals[j-1]; } else { /* Compute preliminary partial sums (in parallel) within each interval */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j+bsize), nvals); sums[0] = 0; for (i = j+1; i < n; i++) { sums[i] = sums[i-1] + vals[i-1]; } } /* Compute final partial sums (in serial) for the first entry of every interval */ for (j = bsize; j < nvals; j += bsize) { sums[j] = sums[j-bsize] + sums[j-1] + vals[j-1]; } /* Compute final partial sums (in parallel) for the remaining entries */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = bsize; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j+bsize), nvals); for (i = j+1; i < n; i++) { sums[i] += sums[j]; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "set"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "add"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) ); if (exec == HYPRE_EXEC_DEVICE) { return( hypre_IJMatrixAssembleParCSRDevice( ijmatrix ) ); } else #endif { return( hypre_IJMatrixAssembleParCSR( ijmatrix ) ); } } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_BigInt *rows, HYPRE_Int *ncols ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(3); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(4); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, HYPRE_BigInt *rows, HYPRE_BigInt *cols, HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols, rows, cols, values ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix, HYPRE_Int type ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixObjectType(ijmatrix) = type; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix, HYPRE_Int *type ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *type = hypre_IJMatrixObjectType(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix, HYPRE_BigInt *ilower, HYPRE_BigInt *iupper, HYPRE_BigInt *jlower, HYPRE_BigInt *jupper ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; MPI_Comm comm; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; HYPRE_Int my_id; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_IJMatrixComm(ijmatrix); row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix); col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix); hypre_MPI_Comm_rank(comm, &my_id); *ilower = row_partitioning[0]; *iupper = row_partitioning[1]-1; *jlower = col_partitioning[0]; *jupper = col_partitioning[1]-1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /** Returns a pointer to an underlying ijmatrix type used to implement IJMatrix. Assumes that the implementation has an underlying matrix, so it would not work with a direct implementation of IJMatrix. @return integer error code @param IJMatrix [IN] The ijmatrix to be pointed to. */ HYPRE_Int HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix, void **object ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *object = hypre_IJMatrixObject( ijmatrix ); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix, const HYPRE_Int *sizes ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return( hypre_IJMatrixSetRowSizesParCSR( ijmatrix , sizes ) ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix, const HYPRE_Int *diag_sizes, const HYPRE_Int *offdiag_sizes ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix, HYPRE_Int max_off_proc_elmts) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix, max_off_proc_elmts) ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixRead * create IJMatrix on host memory *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixRead( const char *filename, MPI_Comm comm, HYPRE_Int type, HYPRE_IJMatrix *matrix_ptr ) { HYPRE_IJMatrix matrix; HYPRE_BigInt ilower, iupper, jlower, jupper; HYPRE_BigInt I, J; HYPRE_Int ncols; HYPRE_Complex value; HYPRE_Int myid, ret; char new_filename[255]; FILE *file; hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper); HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix); HYPRE_IJMatrixSetObjectType(matrix, type); HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST); /* It is important to ensure that whitespace follows the index value to help * catch mistakes in the input file. See comments in IJVectorRead(). */ ncols = 1; while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF ) { if (ret != 3) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file."); return hypre_error_flag; } if (I < ilower || I > iupper) { HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value); } else { HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value); } } HYPRE_IJMatrixAssemble(matrix); fclose(file); *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix, const char *filename ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) ) { hypre_error_in_arg(1); return hypre_error_flag; } void *object; HYPRE_IJMatrixGetObject(matrix, &object); HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object; HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix); if ( hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST ) { hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename); } else { HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename); hypre_ParCSRMatrixDestroy(par_csr2); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixSetOMPFlag *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix, HYPRE_Int omp_flag ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag; return hypre_error_flag; }
TSDFVoxelGridImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <atomic> #include <cmath> #include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/t/geometry/Utility.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/TSDFVoxel.h" #include "open3d/t/geometry/kernel/TSDFVoxelGrid.h" #include "open3d/utility/Console.h" #include "open3d/utility/Timer.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace tsdf { #if defined(__CUDACC__) void IntegrateCUDA #else void IntegrateCPU #endif (const core::Tensor& depth, const core::Tensor& color, const core::Tensor& indices, const core::Tensor& block_keys, core::Tensor& block_values, // Transforms const core::Tensor& intrinsics, const core::Tensor& extrinsics, // Parameters int64_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size); // Real data indexer NDArrayIndexer depth_indexer(depth, 2); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); // Optional color integration NDArrayIndexer color_indexer; bool integrate_color = false; if (color.NumElements() != 0) { color_indexer = NDArrayIndexer(color, 2); integrate_color = true; } // Plain arrays that does not require indexers const int64_t* indices_ptr = static_cast<const int64_t*>(indices.GetDataPtr()); int64_t n = indices.GetLength() * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { // Natural index (0, N) -> (block_idx, voxel_idx) int64_t block_idx = indices_ptr[workload_idx / resolution3]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // coordinate in world (in voxel) int64_t x = (xb * resolution + xv); int64_t y = (yb * resolution + yv); int64_t z = (zb * resolution + zv); // coordinate in camera (in voxel -> in meter) float xc, yc, zc, u, v; transform_indexer.RigidTransform( static_cast<float>(x), static_cast<float>(y), static_cast<float>(z), &xc, &yc, &zc); // coordinate in image (in pixel) transform_indexer.Project(xc, yc, zc, &u, &v); if (!depth_indexer.InBoundary(u, v)) { return; } // Associate image workload and compute SDF and TSDF. float depth = *depth_indexer.GetDataPtrFromCoord<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)) / depth_scale; float sdf = (depth - zc); if (depth <= 0 || depth > depth_max || zc <= 0 || sdf < -sdf_trunc) { return; } sdf = sdf < sdf_trunc ? sdf : sdf_trunc; sdf /= sdf_trunc; // Associate voxel workload and update TSDF/Weights voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); if (integrate_color) { float* color_ptr = color_indexer.GetDataPtrFromCoord<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)); voxel_ptr->Integrate(sdf, color_ptr[0], color_ptr[1], color_ptr[2]); } else { voxel_ptr->Integrate(sdf); } }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfacePointsCUDA #else void ExtractSurfacePointsCPU #endif (const core::Tensor& indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& points, utility::optional<std::reference_wrapper<core::Tensor>> normals, utility::optional<std::reference_wrapper<core::Tensor>> colors, int64_t resolution, float voxel_size, float weight_threshold, int& valid_size) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = static_cast<const int64_t*>(indices.GetDataPtr()); int64_t n_blocks = indices.GetLength(); int64_t n = n_blocks * resolution3; // Output #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_values.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif if (valid_size < 0) { utility::LogWarning( "No estimated max point cloud size provided, using a 2-pass " "estimation. Surface extraction could be slow."); // This pass determines valid number of points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; // Enumerate x-y-z directions for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>( workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { OPEN3D_ATOMIC_ADD(count_ptr, 1); } } }); }); #if defined(__CUDACC__) valid_size = count[0].Item<int>(); count[0] = 0; #else valid_size = (*count_ptr).load(); (*count_ptr) = 0; #endif } int max_count = valid_size; if (points.GetLength() == 0) { points = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } NDArrayIndexer point_indexer(points, 1); // Normals bool extract_normal = false; NDArrayIndexer normal_indexer; if (normals.has_value()) { extract_normal = true; if (normals.value().get().GetLength() == 0) { normals.value().get() = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } normal_indexer = NDArrayIndexer(normals.value().get(), 1); } // This pass extracts exact surface points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { // Colors bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor() && colors.has_value()) { extract_color = true; if (colors.value().get().GetLength() == 0) { colors.value().get() = core::Tensor( {max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } color_indexer = NDArrayIndexer(colors.value().get(), 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; float no[3] = {0}, ni[3] = {0}; if (extract_normal) { GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); } // Enumerate x-y-z axis for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); if (idx >= valid_size) { printf("Point cloud size larger than " "estimated, please increase the " "estimation!\n"); return; } float* point_ptr = point_indexer.GetDataPtrFromCoord<float>( idx); point_ptr[0] = voxel_size * (x + ratio * int(i == 0)); point_ptr[1] = voxel_size * (y + ratio * int(i == 1)); point_ptr[2] = voxel_size * (z + ratio * int(i == 2)); if (extract_color) { float* color_ptr = color_indexer .GetDataPtrFromCoord<float>( idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_i = ptr->GetR(); float g_i = ptr->GetG(); float b_i = ptr->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_i) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_i) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_i) / 255.0f; } if (extract_normal) { GetNormalAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx), ni); float* normal_ptr = normal_indexer .GetDataPtrFromCoord<float>( idx); float nx = (1 - ratio) * no[0] + ratio * ni[0]; float ny = (1 - ratio) * no[1] + ratio * ni[1]; float nz = (1 - ratio) * no[2] + ratio * ni[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; } } } }); }); #if defined(__CUDACC__) int total_count = count.Item<int>(); #else int total_count = (*count_ptr).load(); #endif utility::LogDebug("{} vertices extracted", total_count); valid_size = total_count; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfaceMeshCUDA #else void ExtractSurfaceMeshCPU #endif (const core::Tensor& indices, const core::Tensor& inv_indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& vertices, core::Tensor& triangles, core::Tensor& normals, core::Tensor& colors, int64_t resolution, float voxel_size, float weight_threshold) { int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Output #if defined(__CUDACC__) core::CUDACachedMemoryManager::ReleaseCache(); #endif int n_blocks = static_cast<int>(indices.GetLength()); // Voxel-wise mesh info. 4 channels correspond to: // 3 edges' corresponding vertex index + 1 table index. core::Tensor mesh_structure; try { mesh_structure = core::Tensor::Zeros( {n_blocks, resolution, resolution, resolution, 4}, core::Dtype::Int32, block_keys.GetDevice()); } catch (const std::runtime_error&) { utility::LogError( "[MeshExtractionKernel] Unable to allocate assistance mesh " "structure for Marching " "Cubes with {} active voxel blocks. Please consider using a " "larger voxel size (currently {}) for TSDF " "integration, or using tsdf_volume.cpu() to perform mesh " "extraction on CPU.", n_blocks, voxel_size); } // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer mesh_structure_indexer(mesh_structure, 4); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = indices.GetDataPtr<int64_t>(); const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>(); int64_t n = n_blocks * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif // Pass 0: analyze mesh structure, set up one-on-one correspondences from // edges to vertices. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Check per-vertex sign in the cube to determine cube type int table_idx = 0; for (int i = 0; i < 8; ++i) { voxel_t* voxel_ptr_i = GetVoxelAt( static_cast<int>(xv) + vtx_shifts[i][0], static_cast<int>(yv) + vtx_shifts[i][1], static_cast<int>(zv) + vtx_shifts[i][2], static_cast<int>(workload_block_idx)); if (voxel_ptr_i == nullptr) return; float tsdf_i = voxel_ptr_i->GetTSDF(); float weight_i = voxel_ptr_i->GetWeight(); if (weight_i <= weight_threshold) return; table_idx |= ((tsdf_i < 0) ? (1 << i) : 0); } int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); mesh_struct_ptr[3] = table_idx; if (table_idx == 0 || table_idx == 255) return; // Check per-edge sign in the cube to determine cube type int edges_with_vertices = edge_table[table_idx]; for (int i = 0; i < 12; ++i) { if (edges_with_vertices & (1 << i)) { int64_t xv_i = xv + edge_shifts[i][0]; int64_t yv_i = yv + edge_shifts[i][1]; int64_t zv_i = zv + edge_shifts[i][2]; int edge_i = edge_shifts[i][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer .GetDataPtrFromCoord<int64_t>( workload_block_idx, nb_idx); int* mesh_ptr_i = mesh_structure_indexer.GetDataPtrFromCoord< int>(xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); // Non-atomic write, but we are safe mesh_ptr_i[edge_i] = -1; } } }); }); // Pass 1: determine valid number of vertices. #if defined(__CUDACC__) core::Tensor vtx_count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* vtx_count_ptr = vtx_count.GetDataPtr<int>(); #else std::atomic<int> vtx_count_atomic(0); std::atomic<int>* vtx_count_ptr = &vtx_count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher::LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { #else core::kernel::CPULauncher::LaunchGeneralKernel( n, [&](int64_t workload_idx) { #endif // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; OPEN3D_ATOMIC_ADD(vtx_count_ptr, 1); } }); // Reset count_ptr #if defined(__CUDACC__) int total_vtx_count = vtx_count.Item<int>(); vtx_count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); vtx_count_ptr = vtx_count.GetDataPtr<int>(); #else int total_vtx_count = (*vtx_count_ptr).load(); (*vtx_count_ptr) = 0; #endif utility::LogDebug("Total vertex count = {}", total_vtx_count); vertices = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); normals = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer vertex_indexer(vertices, 1); NDArrayIndexer normal_indexer(normals, 1); // Pass 2: extract vertices. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor()) { extract_color = true; colors = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); color_indexer = NDArrayIndexer(colors, 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // global coordinate (in voxels) int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Obtain voxel ptr voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float no[3] = {0}, ne[3] = {0}; GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; voxel_t* voxel_ptr_e = GetVoxelAt( static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx)); float tsdf_e = voxel_ptr_e->GetTSDF(); float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(vtx_count_ptr, 1); mesh_struct_ptr[e] = idx; float ratio_x = ratio * int(e == 0); float ratio_y = ratio * int(e == 1); float ratio_z = ratio * int(e == 2); float* vertex_ptr = vertex_indexer.GetDataPtrFromCoord<float>(idx); vertex_ptr[0] = voxel_size * (x + ratio_x); vertex_ptr[1] = voxel_size * (y + ratio_y); vertex_ptr[2] = voxel_size * (z + ratio_z); float* normal_ptr = normal_indexer.GetDataPtrFromCoord<float>(idx); GetNormalAt(static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx), ne); float nx = (1 - ratio) * no[0] + ratio * ne[0]; float ny = (1 - ratio) * no[1] + ratio * ne[1]; float nz = (1 - ratio) * no[2] + ratio * ne[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; if (extract_color) { float* color_ptr = color_indexer.GetDataPtrFromCoord<float>( idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_e = voxel_ptr_e->GetR(); float g_e = voxel_ptr_e->GetG(); float b_e = voxel_ptr_e->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f; } } }); }); // Pass 3: connect vertices and form triangles. #if defined(__CUDACC__) core::Tensor triangle_count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* tri_count_ptr = triangle_count.GetDataPtr<int>(); #else std::atomic<int> tri_count_atomic(0); std::atomic<int>* tri_count_ptr = &tri_count_atomic; #endif triangles = core::Tensor({total_vtx_count * 3, 3}, core::Dtype::Int64, block_values.GetDevice()); NDArrayIndexer triangle_indexer(triangles, 1); #if defined(__CUDACC__) core::kernel::CUDALauncher::LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { #else core::kernel::CPULauncher::LaunchGeneralKernel( n, [&](int64_t workload_idx) { #endif // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); int table_idx = mesh_struct_ptr[3]; if (tri_count[table_idx] == 0) return; for (size_t tri = 0; tri < 16; tri += 3) { if (tri_table[table_idx][tri] == -1) return; int tri_idx = OPEN3D_ATOMIC_ADD(tri_count_ptr, 1); for (size_t vertex = 0; vertex < 3; ++vertex) { int edge = tri_table[table_idx][tri + vertex]; int64_t xv_i = xv + edge_shifts[edge][0]; int64_t yv_i = yv + edge_shifts[edge][1]; int64_t zv_i = zv + edge_shifts[edge][2]; int64_t edge_i = edge_shifts[edge][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer .GetDataPtrFromCoord<int64_t>( workload_block_idx, nb_idx); int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); int64_t* triangle_ptr = triangle_indexer.GetDataPtrFromCoord<int64_t>( tri_idx); triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i]; } } }); #if defined(__CUDACC__) int total_tri_count = triangle_count.Item<int>(); #else int total_tri_count = (*tri_count_ptr).load(); #endif utility::LogDebug("Total triangle count = {}", total_tri_count); triangles = triangles.Slice(0, 0, total_tri_count); } #if defined(__CUDACC__) void EstimateRangeCUDA #else void EstimateRangeCPU #endif (const core::Tensor& block_keys, core::Tensor& range_minmax_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max) { // TODO(wei): reserve it in a reusable buffer // Every 2 channels: (min, max) int h_down = h / down_factor; int w_down = w / down_factor; range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer range_map_indexer(range_minmax_map, 2); // Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max) const int fragment_size = 16; const int frag_buffer_size = 65535; // TODO(wei): explicit buffer core::Tensor fragment_buffer = core::Tensor({frag_buffer_size, 6}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1); NDArrayIndexer block_keys_indexer(block_keys, 1); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_keys.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::ceil; using std::floor; using std::max; using std::min; #endif // Pass 0: iterate over blocks, fill-in an rendering fragment array launcher.LaunchGeneralKernel( block_keys.GetLength(), [=] OPEN3D_DEVICE(int64_t workload_idx) { int* key = block_keys_indexer.GetDataPtrFromCoord<int>( workload_idx); int u_min = w_down - 1, v_min = h_down - 1, u_max = 0, v_max = 0; float z_min = depth_max, z_max = depth_min; float xc, yc, zc, u, v; // Project 8 corners to low-res image and form a rectangle for (int i = 0; i < 8; ++i) { float xw = (key[0] + ((i & 1) > 0)) * block_resolution * voxel_size; float yw = (key[1] + ((i & 2) > 0)) * block_resolution * voxel_size; float zw = (key[2] + ((i & 4) > 0)) * block_resolution * voxel_size; w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc, &zc); if (zc <= 0) continue; // Project to the down sampled image buffer w2c_transform_indexer.Project(xc, yc, zc, &u, &v); u /= down_factor; v /= down_factor; v_min = min(static_cast<int>(floor(v)), v_min); v_max = max(static_cast<int>(ceil(v)), v_max); u_min = min(static_cast<int>(floor(u)), u_min); u_max = max(static_cast<int>(ceil(u)), u_max); z_min = min(z_min, zc); z_max = max(z_max, zc); } v_min = max(0, v_min); v_max = min(h_down - 1, v_max); u_min = max(0, u_min); u_max = min(w_down - 1, u_max); if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return; // Divide the rectangle into small 16x16 fragments int frag_v_count = ceil(float(v_max - v_min + 1) / float(fragment_size)); int frag_u_count = ceil(float(u_max - u_min + 1) / float(fragment_size)); int frag_count = frag_v_count * frag_u_count; int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1); int frag_count_end = frag_count_start + frag_count; if (frag_count_end >= frag_buffer_size) { printf("Fragment count exceeding buffer size, abort!\n"); } int offset = 0; for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) { for (int frag_u = 0; frag_u < frag_u_count; ++frag_u, ++offset) { float* frag_ptr = frag_buffer_indexer.GetDataPtrFromCoord<float>( frag_count_start + offset); // zmin, zmax frag_ptr[0] = z_min; frag_ptr[1] = z_max; // vmin, umin frag_ptr[2] = v_min + frag_v * fragment_size; frag_ptr[3] = u_min + frag_u * fragment_size; // vmax, umax frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1, static_cast<float>(v_max)); frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1, static_cast<float>(u_max)); } } }); #if defined(__CUDACC__) int frag_count = count[0].Item<int>(); #else int frag_count = (*count_ptr).load(); #endif // Pass 0.5: Fill in range map to prepare for atomic min/max launcher.LaunchGeneralKernel( h_down * w_down, [=] OPEN3D_DEVICE(int64_t workload_idx) { int v = workload_idx / w_down; int u = workload_idx % w_down; float* range_ptr = range_map_indexer.GetDataPtrFromCoord<float>(u, v); range_ptr[0] = depth_max; range_ptr[1] = depth_min; }); // Pass 1: iterate over rendering fragment array, fill-in range launcher.LaunchGeneralKernel( frag_count * fragment_size * fragment_size, [=] OPEN3D_DEVICE(int64_t workload_idx) { int frag_idx = workload_idx / (fragment_size * fragment_size); int local_idx = workload_idx % (fragment_size * fragment_size); int dv = local_idx / fragment_size; int du = local_idx % fragment_size; float* frag_ptr = frag_buffer_indexer.GetDataPtrFromCoord<float>( frag_idx); int v_min = static_cast<int>(frag_ptr[2]); int u_min = static_cast<int>(frag_ptr[3]); int v_max = static_cast<int>(frag_ptr[4]); int u_max = static_cast<int>(frag_ptr[5]); int v = v_min + dv; int u = u_min + du; if (v > v_max || u > u_max) return; float z_min = frag_ptr[0]; float z_max = frag_ptr[1]; float* range_ptr = range_map_indexer.GetDataPtrFromCoord<float>(u, v); #ifdef __CUDACC__ atomicMinf(&(range_ptr[0]), z_min); atomicMaxf(&(range_ptr[1]), z_max); #else #pragma omp critical { range_ptr[0] = min(z_min, range_ptr[0]); range_ptr[1] = max(z_max, range_ptr[1]); } #endif }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } struct BlockCache { int x; int y; int z; int block_idx; inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) { return (xin == x && yin == y && zin == z) ? block_idx : -1; } inline void OPEN3D_DEVICE Update(int xin, int yin, int zin, int block_idx_in) { x = xin; y = yin; z = zin; block_idx = block_idx_in; } }; #if defined(__CUDACC__) void RayCastCUDA #else void RayCastCPU #endif (std::shared_ptr<core::DeviceHashmap>& hashmap, const core::Tensor& block_values, const core::Tensor& range_map, core::Tensor& vertex_map, core::Tensor& depth_map, core::Tensor& color_map, core::Tensor& normal_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int64_t block_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_min, float depth_max, float weight_threshold) { using Key = core::Block<int, 3>; using Hash = core::BlockHash<int, 3>; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) auto cuda_hashmap = std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap); if (cuda_hashmap == nullptr) { utility::LogError( "Unsupported backend: CUDA raycasting only supports STDGPU."); } auto hashmap_impl = cuda_hashmap->GetImpl(); #else auto cpu_hashmap = std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap); auto hashmap_impl = *cpu_hashmap->GetImpl(); #endif NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer range_map_indexer(range_map, 2); NDArrayIndexer vertex_map_indexer; NDArrayIndexer depth_map_indexer; NDArrayIndexer color_map_indexer; NDArrayIndexer normal_map_indexer; bool enable_vertex = (vertex_map.GetLength() != 0); bool enable_depth = (depth_map.GetLength() != 0); bool enable_color = (color_map.GetLength() != 0); bool enable_normal = (normal_map.GetLength() != 0); if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) { utility::LogWarning("No output specified for ray casting, exit."); return; } if (enable_vertex) { vertex_map_indexer = NDArrayIndexer(vertex_map, 2); } if (enable_depth) { depth_map_indexer = NDArrayIndexer(depth_map, 2); } if (enable_color) { color_map_indexer = NDArrayIndexer(color_map, 2); } if (enable_normal) { normal_map_indexer = NDArrayIndexer(normal_map, 2); } TransformIndexer c2w_transform_indexer( intrinsics, t::geometry::InverseTransformation(extrinsics)); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); int64_t rows = h; int64_t cols = w; float block_size = voxel_size * block_resolution; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::max; #endif DISPATCH_BYTESIZE_TO_VOXEL(voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( rows * cols, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAtP = [&] OPEN3D_DEVICE( int x_b, int y_b, int z_b, int x_v, int y_v, int z_v, core::addr_t block_addr, BlockCache& cache) -> voxel_t* { int x_vn = (x_v + block_resolution) % block_resolution; int y_vn = (y_v + block_resolution) % block_resolution; int z_vn = (z_v + block_resolution) % block_resolution; int dx_b = Sign(x_v - x_vn); int dy_b = Sign(y_v - y_vn); int dz_b = Sign(z_v - z_vn); if (dx_b == 0 && dy_b == 0 && dz_b == 0) { return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>(x_v, y_v, z_v, block_addr); } else { Key key; key(0) = x_b + dx_b; key(1) = y_b + dy_b; key(2) = z_b + dz_b; int block_addr = cache.Check(key(0), key(1), key(2)); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(key(0), key(1), key(2), block_addr); } return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( x_vn, y_vn, z_vn, block_addr); } }; auto GetVoxelAtT = [&] OPEN3D_DEVICE( float x_o, float y_o, float z_o, float x_d, float y_d, float z_d, float t, BlockCache& cache) -> voxel_t* { float x_g = x_o + t * x_d; float y_g = y_o + t * y_d; float z_g = z_o + t * z_d; // Block coordinate and look up int x_b = static_cast<int>(floor(x_g / block_size)); int y_b = static_cast<int>(floor(y_g / block_size)); int z_b = static_cast<int>(floor(z_g / block_size)); Key key; key(0) = x_b; key(1) = y_b; key(2) = z_b; int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } // Voxel coordinate and look up int x_v = int((x_g - x_b * block_size) / voxel_size); int y_v = int((y_g - y_b * block_size) / voxel_size); int z_v = int((z_g - z_b * block_size) / voxel_size); return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>(x_v, y_v, z_v, block_addr); }; int64_t y = workload_idx / cols; int64_t x = workload_idx % cols; float *depth_ptr = nullptr, *vertex_ptr = nullptr, *normal_ptr = nullptr, *color_ptr = nullptr; if (enable_depth) { depth_ptr = depth_map_indexer.GetDataPtrFromCoord<float>(x, y); *depth_ptr = 0; } if (enable_vertex) { vertex_ptr = vertex_map_indexer.GetDataPtrFromCoord<float>( x, y); vertex_ptr[0] = 0; vertex_ptr[1] = 0; vertex_ptr[2] = 0; } if (enable_color) { color_ptr = color_map_indexer.GetDataPtrFromCoord<float>(x, y); color_ptr[0] = 0; color_ptr[1] = 0; color_ptr[2] = 0; } if (enable_normal) { normal_ptr = normal_map_indexer.GetDataPtrFromCoord<float>( x, y); normal_ptr[0] = 0; normal_ptr[1] = 0; normal_ptr[2] = 0; } const float* range = range_map_indexer.GetDataPtrFromCoord<float>(x / 8, y / 8); float t = range[0]; const float t_max = range[1]; if (t >= t_max) return; // Coordinates in camera and global float x_c = 0, y_c = 0, z_c = 0; float x_g = 0, y_g = 0, z_g = 0; float x_o = 0, y_o = 0, z_o = 0; // Iterative ray intersection check float t_prev = t; float tsdf_prev = -1.0f; float tsdf = 1.0; float w = 0.0; // Camera origin c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o, &z_o); // Direction c2w_transform_indexer.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0f, &x_c, &y_c, &z_c); c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); float x_d = (x_g - x_o); float y_d = (y_g - y_o); float z_d = (z_g - z_o); BlockCache cache{0, 0, 0, -1}; bool surface_found = false; while (t < t_max) { voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d, y_d, z_d, t, cache); if (!voxel_ptr) { t_prev = t; t += block_size; } else { tsdf_prev = tsdf; tsdf = voxel_ptr->GetTSDF(); w = voxel_ptr->GetWeight(); if (tsdf_prev > 0 && w >= weight_threshold && tsdf <= 0) { surface_found = true; break; } t_prev = t; float delta = tsdf * sdf_trunc; t += delta < voxel_size ? voxel_size : delta; } } if (surface_found) { float t_intersect = (t * tsdf_prev - t_prev * tsdf) / (tsdf_prev - tsdf); x_g = x_o + t_intersect * x_d; y_g = y_o + t_intersect * y_d; z_g = z_o + t_intersect * z_d; // Trivial vertex assignment if (enable_depth) { *depth_ptr = t_intersect * depth_scale; } if (enable_vertex) { w2c_transform_indexer.RigidTransform( x_g, y_g, z_g, vertex_ptr + 0, vertex_ptr + 1, vertex_ptr + 2); } // Trilinear interpolation // TODO(wei): simplify the flow by splitting the // functions given what is enabled if (enable_color || enable_normal) { int x_b = static_cast<int>(floor(x_g / block_size)); int y_b = static_cast<int>(floor(y_g / block_size)); int z_b = static_cast<int>(floor(z_g / block_size)); float x_v = (x_g - float(x_b) * block_size) / voxel_size; float y_v = (y_g - float(y_b) * block_size) / voxel_size; float z_v = (z_g - float(z_b) * block_size) / voxel_size; Key key; key(0) = x_b; key(1) = y_b; key(2) = z_b; int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } int x_v_floor = static_cast<int>(floor(x_v)); int y_v_floor = static_cast<int>(floor(y_v)); int z_v_floor = static_cast<int>(floor(z_v)); float ratio_x = x_v - float(x_v_floor); float ratio_y = y_v - float(y_v_floor); float ratio_z = z_v - float(z_v_floor); float sum_weight_color = 0.0; float sum_weight_normal = 0.0; for (int k = 0; k < 8; ++k) { int dx_v = (k & 1) > 0 ? 1 : 0; int dy_v = (k & 2) > 0 ? 1 : 0; int dz_v = (k & 4) > 0 ? 1 : 0; float ratio = (dx_v * (ratio_x) + (1 - dx_v) * (1 - ratio_x)) * (dy_v * (ratio_y) + (1 - dy_v) * (1 - ratio_y)) * (dz_v * (ratio_z) + (1 - dz_v) * (1 - ratio_z)); voxel_t* voxel_ptr_k = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v, y_v_floor + dy_v, z_v_floor + dz_v, block_addr, cache); if (enable_color && voxel_ptr_k && voxel_ptr_k->GetWeight() > 0) { sum_weight_color += ratio; color_ptr[0] += ratio * voxel_ptr_k->GetR(); color_ptr[1] += ratio * voxel_ptr_k->GetG(); color_ptr[2] += ratio * voxel_ptr_k->GetB(); } if (enable_normal) { for (int dim = 0; dim < 3; ++dim) { voxel_t* voxel_ptr_k_plus = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v + (dim == 0), y_v_floor + dy_v + (dim == 1), z_v_floor + dz_v + (dim == 2), block_addr, cache); voxel_t* voxel_ptr_k_minus = GetVoxelAtP(x_b, y_b, z_b, x_v_floor + dx_v - (dim == 0), y_v_floor + dy_v - (dim == 1), z_v_floor + dz_v - (dim == 2), block_addr, cache); bool valid = false; if (voxel_ptr_k_plus && voxel_ptr_k_plus->GetWeight() > 0) { normal_ptr[dim] += ratio * voxel_ptr_k_plus ->GetTSDF() / (2 * voxel_size); valid = true; } if (voxel_ptr_k_minus && voxel_ptr_k_minus->GetWeight() > 0) { normal_ptr[dim] -= ratio * voxel_ptr_k_minus ->GetTSDF() / (2 * voxel_size); valid = true; } sum_weight_normal += valid ? ratio : 0; } } // if (enable_normal) } // loop over 8 neighbors if (enable_color && sum_weight_color > 0) { sum_weight_color *= 255.0; color_ptr[0] /= sum_weight_color; color_ptr[1] /= sum_weight_color; color_ptr[2] /= sum_weight_color; } if (enable_normal && sum_weight_normal > 0) { normal_ptr[0] /= sum_weight_normal; normal_ptr[1] /= sum_weight_normal; normal_ptr[2] /= sum_weight_normal; float norm = sqrt(normal_ptr[0] * normal_ptr[0] + normal_ptr[1] * normal_ptr[1] + normal_ptr[2] * normal_ptr[2]); w2c_transform_indexer.Rotate( normal_ptr[0] / norm, normal_ptr[1] / norm, normal_ptr[2] / norm, normal_ptr + 0, normal_ptr + 1, normal_ptr + 2); } } // if (color or normal) } // if (tsdf < 0) }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } } // namespace tsdf } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
GB_unop__identity_uint16_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_int64) // op(A') function: GB (_unop_tran__identity_uint16_int64) // C type: uint16_t // A type: int64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_int64) ( uint16_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dense.c
// // dense.c // // Created by Hussian Alamri on September 2012 // #include "dense.h" /* Acquired from Matrix Market http://math.nist.gov/MatrixMarket/ with few modifications */ MATRIX* ReadMatrix(FILE *file) { MM_typecode matcode; int M, N, nz, ret_code, i, j, k; double *val; double **A; MATRIX *m; if (mm_read_banner(file, &matcode) != 0) { printf("Could not process Matrix Market banner.\n"); exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode) ) { printf("Sorry, this application does not support "); printf("Matrix Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(file, &M, &N, &nz)) !=0) exit(1); /* reseve memory for matrices */ m = (MATRIX *) malloc(sizeof(MATRIX)); int* I = (int *) malloc(nz * sizeof(int)); int* J = (int *) malloc(nz * sizeof(int)); val = (double *) malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i=0; i<nz; i++) { fscanf(file, "%d %d %lf\n", &I[i], &J[i], &val[i]); I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } if (file !=stdin) fclose(file); A = (double**) malloc(M * sizeof(double*)); for (i = 0; i < M; ++i) { A[i] = (double*) malloc(N * sizeof(double)); } for(j=0; j < M; ++j) { for(k=0; k < N; ++k) { A[j][k] = 0.0; } } for(i=0; i < nz; ++i) { j = I[i]; k = J[i]; A[j][k] = val[i]; } m->nnz = nz; m->nrows = M; m->ncols = N; m->mel = A; return m; } void WriteMatrix(FILE *file, MATRIX* reM) { MM_typecode matcode; int i, jjj, k; int nz = reM->nnz; int* I = (int *) malloc(nz * sizeof(int)); int* J = (int *) malloc(nz * sizeof(int)); double* val = (double *) malloc(nz * sizeof(double)); int nrows = reM->nrows; int ncols = reM->ncols; int N = nrows; int M = ncols; k = 0; while (k < nz) { for (i = 0; i < nrows; ++i) { for (jjj = 0; jjj < ncols; ++jjj) { if((double)reM->mel[i][jjj] != (double)0) { I[k] = i; J[k] = jjj; val[k] = reM->mel[i][jjj]; k++; } } } break; } // end while mm_initialize_typecode(&matcode); mm_set_matrix(&matcode); mm_set_coordinate(&matcode); mm_set_real(&matcode); mm_write_banner(file, matcode); mm_write_mtx_crd_size(file, M, N, nz); /* NOTE: matrix market files use 1-based indices, i.e. first element of a vector has index 1, not 0. */ for (i=0; i<nz; i++) fprintf(file, "%d %d %10.3g\n", I[i]+1, J[i]+1, val[i]); fflush(file); #ifdef LOG printf("Closing file...\n"); #endif fclose(file); free(I); free(J); free(val); } double DotProduct(const double *a, const double *b, int n) { double result = 0.0f; int i; #pragma omp parallel for default(shared) private(i) reduction(+:result) for(i = 0; i < n; i++) { result += (a[i] * b[i]); } return result; } double* MultiplyMatrix(MATRIX *m, double *val) { int i, j, s, s1; int nrows = m->nrows; int ncols = m->ncols; double* r = malloc(ncols * sizeof(double)); double** mal = m->mel; double result; #pragma omp parallel for private(s) for (s=0; s < nrows; s++) { r[s] = 0; } #pragma omp parallel for default(shared) private(i, j) reduction(+:result) for (i=0; i< nrows; ++i) { for (j=0; j < ncols; ++j) { result += (double) (mal[i][j] * val[j]); } r[i] = result; } return r; } void PrintMatrix(MATRIX *m) { int i, j; for (i = 0; i < m->nrows; i++) { for (j = 0; j < m->ncols; j++) { printf("%f ", m->mel[i][j]); } printf("\n"); } } void DestroyMatrix(MATRIX *m) { int i; for (i = 0; i < m->nrows; ++i) { free(m->mel[i]); } free(m->mel); free(m); }
tsemf_share.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 2015-2018 Bernard Parent Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cycle/tsemf/_tsemf.h> #include <cycle/share/cycle_share.h> #include <cycle/share/tsemf_share.h> #include <src/bdry.h> #ifdef EMFIELD void find_tsemf_SOR_numsubiter_numcycle(long numsubitertot,long numsubiteropt, long *numsubiter, long *numcycle){ *numsubiter=min(numsubiteropt,numsubitertot/2); if (*numsubiter==numsubiteropt) *numcycle=max(1,round((double)numsubitertot*0.5/(double)numsubiteropt)); else *numcycle=1; *numsubiter=numsubitertot/(2*(*numcycle)); } void update_U_from_dUstar_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ for (flux=0; flux<nfe; flux++) np[l].bs->dUstaremfield[flux]*=gl->relaxEMF; add_dUstar_to_U_emfield(np, gl, l, np[l].bs->dUstaremfield); thread_lock_global_set(gl,THREADTYPE_ALL); gl->effiter_U_emfield+=1.0e0/(double)gl->nn; thread_lock_global_unset(gl,THREADTYPE_ALL); } } void update_U_from_dUstar_emfield_without_relaxation(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ for (flux=0; flux<nfe; flux++) np[l].bs->dUstaremfield[flux]*=gl->relaxEMF; add_dUstar_to_U_emfield(np, gl, l, np[l].bs->dUstaremfield); thread_lock_global_set(gl,THREADTYPE_ALL); gl->effiter_U_emfield+=1.0e0/(double)gl->nn; thread_lock_global_unset(gl,THREADTYPE_ALL); } } void init_dUstar_emfield_ADI(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; double dtau; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ assert_np(np[l],is_node_inner(np[l],TYPELEVEL_EMFIELD)); for (flux=0; flux<nfe; flux++) { find_dtau_emfield(np,gl,l,flux,&dtau); np[l].bs->dUstaremfield[flux]=-np[l].bs->Resemfield[flux]*dtau; } } } void update_dUstar_emfield_ADI(np_t *np, gl_t *gl, long theta, long ls, long le){ long jj,l,flux,cnt; EXM_tdmaline_t *tdma; double dtau; cnt=0; for(l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)) cnt++; tdma=(EXM_tdmaline_t *)malloc((cnt+5)*sizeof(EXM_tdmaline_t)); for (flux=0; flux<nfe; flux++){ jj=0; for(l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)) { jj++; find_dtau_emfield(np,gl,l,flux,&dtau); find_linearization_coefficients_inner_node_emfield(np, gl, l, theta, flux, &(tdma[jj].val[0]), &(tdma[jj].val[1]), &(tdma[jj].val[2])); tdma[jj].val[1]+=1.0/dtau; tdma[jj].val[3]=np[l].bs->dUstaremfield[flux]/dtau; } tdma[0].val[0]=0.0; find_linearization_coefficients_bdry_node_emfield(np, gl, _l_minus_one(ls,gl,theta), theta, +1, flux, _node_type(np[_l_minus_one(ls,gl,theta)], TYPELEVEL_EMFIELD), &(tdma[0].val[1]), &(tdma[0].val[2]), &(tdma[0].val[3])); tdma[jj+1].val[2]=0.0; find_linearization_coefficients_bdry_node_emfield(np, gl, _l_plus_one(le,gl,theta), theta, -1, flux, _node_type(np[_l_plus_one(le,gl,theta)], TYPELEVEL_EMFIELD), &(tdma[jj+1].val[1]), &(tdma[jj+1].val[0]), &(tdma[jj+1].val[3])); /* solve the TDMA */ solve_TDMA_emfield(np, gl, theta, _l_minus_one(ls,gl,theta), _l_plus_one(le,gl,theta), TYPELEVEL_EMFIELD, tdma, jj+2); /*--------Here: add RHS of TDMA to Ustar. */ jj=-1; for (l=_l_minus_one(ls,gl,theta); l!=_l_plus_one(_l_plus_one(le,gl,theta),gl,theta); l=_l_plus_one(l,gl,theta)){ jj++; np[l].bs->dUstaremfield[flux]=tdma[jj].val[3]/tdma[jj].val[1]; } } free(tdma); } void update_U_emfield_ADI(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np,gl,zone,&init_dUstar_emfield_ADI,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_dUstar_emfield_ADI,SWEEPTYPE_IJK, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_emfield,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } #ifdef _TSEMF_STORE_COEFFICIENTS static void init_dUstar_emfield_SOR(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ assert_np(np[l],is_node_valid(np[l],TYPELEVEL_EMFIELD)); for (flux=0; flux<nfe; flux++) { np[l].bs->dUstaremfield[flux]=0.0; } } } #define SOR_SWEEP_FORWARD 1 #define SOR_SWEEP_BACKWARD 2 #ifdef DISTMPI void update_dUstar_emfield_SOR_node(np_t *np, gl_t *gl, long l, long flux, int SOR_SWEEP){ long dim,theta,thetasgn; double sum,RHS,Cp0,Cp1,dtau; #ifndef NDEBUG long i,j,k; #endif if (is_node_valid(np[l],TYPELEVEL_EMFIELD)) { if (is_node_inner(np[l],TYPELEVEL_EMFIELD)) { // for inner node sum=-np[l].bs->Resemfield[flux]; for (dim=0; dim<nd; dim++){ sum-=np[l].bs->coeffp1[dim][flux]*np[_al(gl,l,dim,+1)].bs->dUstaremfield[flux] +np[l].bs->coeffm1[dim][flux]*np[_al(gl,l,dim,-1)].bs->dUstaremfield[flux]; } #ifndef NDEBUG find_ijk_from_l(gl,l,&i,&j,&k); switch (SOR_SWEEP){ case SOR_SWEEP_FORWARD: if (!np[_ai(gl,i-1,j,k)].bs->TSEMF_UPDATED) fatal_error("Node not updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i-1,j,k); if (np[_ai(gl,i+1,j,k)].bs->TSEMF_UPDATED) fatal_error("Node updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i+1,j,k); if (!np[_ai(gl,i,j-1,k)].bs->TSEMF_UPDATED) fatal_error("Node not updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j-1,k); if (np[_ai(gl,i,j+1,k)].bs->TSEMF_UPDATED) fatal_error("Node updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j+1,k); #ifdef _3DL if (!np[_ai(gl,i,j,k-1)].bs->TSEMF_UPDATED) fatal_error("Node not updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j,k-1); if (np[_ai(gl,i,j,k+1)].bs->TSEMF_UPDATED) fatal_error("Node updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j,k+1); #endif break; case SOR_SWEEP_BACKWARD: if (!np[_ai(gl,i+1,j,k)].bs->TSEMF_UPDATED) fatal_error("Node not updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i+1,j,k); if (np[_ai(gl,i-1,j,k)].bs->TSEMF_UPDATED) fatal_error("Node updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i-1,j,k); if (!np[_ai(gl,i,j+1,k)].bs->TSEMF_UPDATED) fatal_error("Node not updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j+1,k); if (np[_ai(gl,i,j-1,k)].bs->TSEMF_UPDATED) fatal_error("Node updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j-1,k); #ifdef _3DL if (!np[_ai(gl,i,j,k+1)].bs->TSEMF_UPDATED) fatal_error("Node not updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j,k+1); if (np[_ai(gl,i,j,k-1)].bs->TSEMF_UPDATED) fatal_error("Node updated at i=%ld j=%ld k=%ld in update_dUstar_emfield_SOR_node()\n",i,j,k-1); #endif break; default: fatal_error("SOR_SWEEP must be set to either SOR_SWEEP_BACKWARD or SOR_SWEEP_FORWARD"); } #endif dtau=np[l].bs->dtauemfield[flux]; RHS=(1.0-gl->relaxEMF)*np[l].bs->dUstaremfield[flux]+gl->relaxEMF/(np[l].bs->coeffp0sum[flux]+1.0/dtau)*sum; np[l].bs->dUstaremfield[flux]=RHS; } else { // for bdry node if (find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &theta, &thetasgn)){ find_linearization_coefficients_bdry_node_emfield(np, gl, l, theta, thetasgn, flux, _node_type(np[l], TYPELEVEL_EMFIELD), &Cp0, &Cp1,&sum); sum-=Cp1*np[_al(gl,l,theta,thetasgn)].bs->dUstaremfield[flux]; RHS=(1.0-gl->relaxEMF)*np[l].bs->dUstaremfield[flux]+gl->relaxEMF/Cp0*sum; np[l].bs->dUstaremfield[flux]=RHS; #ifndef NDEBUG switch (SOR_SWEEP){ case SOR_SWEEP_FORWARD: if (thetasgn>0){ if (np[_al(gl,l,theta,+1)].bs->TSEMF_UPDATED) fatal_error("Near-bdry node wrongly updated in update_dUstar_emfield_SOR_node()"); } else { if (!np[_al(gl,l,theta,-1)].bs->TSEMF_UPDATED) fatal_error("Near-bdry node not updated in update_dUstar_emfield_SOR_node()"); } break; case SOR_SWEEP_BACKWARD: if (thetasgn>0){ if (!np[_al(gl,l,theta,+1)].bs->TSEMF_UPDATED) fatal_error("Near-bdry node not updated in update_dUstar_emfield_SOR_node()"); } else { if (np[_al(gl,l,theta,-1)].bs->TSEMF_UPDATED) fatal_error("Near-bdry node wrongly updated in update_dUstar_emfield_SOR_node()"); } break; default: fatal_error("SOR_SWEEP must be set to either SOR_SWEEP_BACKWARD or SOR_SWEEP_FORWARD"); } #endif } } #ifndef NDEBUG np[l].bs->TSEMF_UPDATED=TRUE; #endif } } static void find_mpivars_in_zone(np_t *np, gl_t *gl, long is, long js, long ks, long ie, long je, long ke, long flux, int *cntvars, double **mpivars){ long i,j,k; *cntvars=0; for_1DL(i,is,ie){ for_2DL(j,js,je){ for_3DL(k,ks,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ (*mpivars)=(double *)realloc(*mpivars,sizeof(double)*(*cntvars+1)); (*mpivars)[*cntvars]=np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]; (*cntvars)++; } } } } } static void copy_mpivars_in_zone(np_t *np, gl_t *gl, long is, long js, long ks, long ie, long je, long ke, long flux, int numvars, double *mpivars){ long i,j,k; int cntvars; cntvars=0; for_1DL(i,is,ie){ for_2DL(j,js,je){ for_3DL(k,ks,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]=mpivars[cntvars]; cntvars++; #ifndef NDEBUG np[_ai(gl,i,j,k)].bs->TSEMF_UPDATED=TRUE; #endif } } } } #ifndef NDEBUG if (cntvars!=numvars) printf("cntvars=%d numvars=%d\n",cntvars,numvars); assert(cntvars==numvars); #endif } static void update_dUstar_emfield_SOR_forward(np_t *np, gl_t *gl, long flux, zone_t zone, long numiter){ long i,iter,cnt; long j,k,plane,planestart,planeend,jplusk; #ifdef _3D long *lplane[((zone.ie-zone.is+1)+(zone.je-zone.js+1))+(zone.ke-zone.ks+1)+1]; #endif #ifdef _2D long *lplane[((zone.ie-zone.is+1)+(zone.je-zone.js+1))+1]; #endif int numvars,rank,thisrank; double *mpivars; int packsize,buffersize,bbuffersize; double *buffer,*bbuffer; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Pack_size( 1, MPI_DOUBLE, MPI_COMM_WORLD, &packsize ); buffersize = (zone.ie-zone.is)*(zone.je-zone.js)if3DL(*(zone.ke-zone.ks)) * (MPI_BSEND_OVERHEAD + packsize); buffer = (double *)malloc( buffersize ); mpivars=(double *)malloc(sizeof(double)); /* find lplane */ planestart=1; planeend=((zone.ie-zone.is+1)+(zone.je-zone.js+1)); #ifdef _3DL planeend+=(zone.ke-zone.ks+1); #endif for (plane=planestart; plane<=planeend; plane++){ lplane[plane]=(long *)malloc(sizeof(long)); cnt=0; lplane[plane][0]=0; for (i=zone.is; i<=zone.ie; i++){ jplusk=plane-(i-zone.is+1); #ifdef _3DL for (k=zone.ks; k<=zone.ke; k++){ j=jplusk-(k-zone.ks)+zone.js; #else k=0; j=jplusk+zone.js; #endif if (j>=zone.js && j<=zone.je){ cnt++; lplane[plane]=(long *)realloc(lplane[plane],sizeof(long)*(cnt+1)); lplane[plane][0]=cnt; lplane[plane][cnt]=_ai(gl,i,j,k); } #ifdef _3DL } #endif } } for (iter=0; iter<numiter; iter++) { MPI_Buffer_attach( buffer, buffersize ); #ifndef NDEBUG for_ijk(zone,is-1,js-1,ks-1,ie+1,je+1,ke+1){ np[_ai(gl,i,j,k)].bs->TSEMF_UPDATED=FALSE; } #endif /* receive data from other processes before the threading starts */ rank=_node_rank(gl,zone.is-1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is-1,zone.js,zone.ks,zone.is-1,zone.je,zone.ke, flux, numvars,mpivars); } rank=_node_rank(gl,zone.is,zone.js-1,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.js-1,zone.ks,zone.ie,zone.js-1,zone.ke, flux, numvars,mpivars); } #ifdef _3DL rank=_node_rank(gl,zone.is,zone.js,zone.ks-1); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks-1,zone.ie,zone.je,zone.ks-1, flux, numvars,mpivars); } #endif /* the threaded loop */ for (plane=planestart; plane<=planeend; plane++){ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(static) #endif for (cnt=1; cnt<=lplane[plane][0]; cnt++){ update_dUstar_emfield_SOR_node(np, gl, lplane[plane][cnt], flux, SOR_SWEEP_FORWARD); } } /* exchange data with other processes after the threading */ find_mpivars_in_zone(np,gl,zone.ie,zone.js,zone.ks,zone.ie,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.ie+1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks,zone.is,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is-1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.is,zone.je,zone.ks,zone.ie,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.je+1,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks,zone.ie,zone.js,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.js-1,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } #ifdef _3DL find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ke,zone.ie,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.js,zone.ke+1); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks,zone.ie,zone.je,zone.ks, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.js,zone.ks-1); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } #endif rank=_node_rank(gl,zone.ie+1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.ie+1,zone.js,zone.ks,zone.ie+1,zone.je,zone.ke, flux, numvars,mpivars); } rank=_node_rank(gl,zone.is,zone.je+1,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.je+1,zone.ks,zone.ie,zone.je+1,zone.ke, flux, numvars,mpivars); } #ifdef _3DL rank=_node_rank(gl,zone.is,zone.js,zone.ke+1); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ke+1,zone.ie,zone.je,zone.ke+1, flux, numvars,mpivars); } #endif #ifndef NDEBUG for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD) && !np[_ai(gl,i,j,k)].bs->TSEMF_UPDATED) fatal_error("Node not updated correctly at i=%ld j=%ld k=%ld.",i,j,k); } #endif MPI_Buffer_detach( &bbuffer, &bbuffersize ); } for (plane=planestart; plane<=planeend; plane++){ free(lplane[plane]); } MPI_Barrier(MPI_COMM_WORLD); free(buffer); free(mpivars); } static void update_dUstar_emfield_SOR_backward(np_t *np, gl_t *gl, long flux, zone_t zone, long numiter){ long i,iter,cnt; long j,k,plane,planestart,planeend,jplusk; #ifdef _3D long *lplane[((zone.ie-zone.is+1)+(zone.je-zone.js+1))+(zone.ke-zone.ks+1)+1]; #endif #ifdef _2D long *lplane[((zone.ie-zone.is+1)+(zone.je-zone.js+1))+1]; #endif int numvars,rank,thisrank; double *mpivars; int packsize,buffersize,bbuffersize; double *buffer,*bbuffer; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Pack_size( 1, MPI_DOUBLE, MPI_COMM_WORLD, &packsize ); buffersize = min(INT_MAX,(zone.ie-zone.is)*(zone.je-zone.js)if3DL(*(zone.ke-zone.ks)) * (MPI_BSEND_OVERHEAD + packsize)); buffer = (double *)malloc( buffersize ); mpivars=(double *)malloc(sizeof(double)); planestart=1; planeend=((zone.ie-zone.is+1)+(zone.je-zone.js+1)); #ifdef _3DL planeend+=(zone.ke-zone.ks+1); #endif for (plane=planeend; plane>=planestart; plane--){ lplane[plane]=(long *)malloc(sizeof(long)); cnt=0; lplane[plane][0]=0; for (i=zone.ie; i>=zone.is; i--){ jplusk=plane-(i-zone.is+1); #ifdef _3DL for (k=zone.ke; k>=zone.ks; k--){ j=jplusk-(k-zone.ks)+zone.js; #else k=0; j=jplusk+zone.js; #endif if (j>=zone.js && j<=zone.je){ cnt++; lplane[plane]=(long *)realloc(lplane[plane],sizeof(long)*(cnt+1)); lplane[plane][0]=cnt; lplane[plane][cnt]=_ai(gl,i,j,k); } #ifdef _3DL } #endif } } for (iter=0; iter<numiter; iter++) { MPI_Buffer_attach( buffer, buffersize ); #ifndef NDEBUG for_ijk(zone,is-1,js-1,ks-1,ie+1,je+1,ke+1){ np[_ai(gl,i,j,k)].bs->TSEMF_UPDATED=FALSE; } #endif /* receive data from other processes before the threading starts */ rank=_node_rank(gl,zone.ie+1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.ie+1,zone.js,zone.ks,zone.ie+1,zone.je,zone.ke, flux, numvars,mpivars); } rank=_node_rank(gl,zone.is,zone.je+1,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.je+1,zone.ks,zone.ie,zone.je+1,zone.ke, flux, numvars,mpivars); } #ifdef _3DL rank=_node_rank(gl,zone.is,zone.js,zone.ke+1); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ke+1,zone.ie,zone.je,zone.ke+1, flux, numvars,mpivars); } #endif /* the threaded loop */ for (plane=planeend; plane>=planestart; plane--){ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(static) #endif for (cnt=1; cnt<=lplane[plane][0]; cnt++){ update_dUstar_emfield_SOR_node(np, gl, lplane[plane][cnt], flux, SOR_SWEEP_BACKWARD); } } /* exchange data with other processes after the threading */ find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks,zone.is,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is-1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.ie,zone.js,zone.ks,zone.ie,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.ie+1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks,zone.ie,zone.js,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.js-1,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.is,zone.je,zone.ks,zone.ie,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.je+1,zone.ks); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } #ifdef _3DL find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks,zone.ie,zone.je,zone.ks, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.js,zone.ks-1); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } find_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ke,zone.ie,zone.je,zone.ke, flux, &numvars,&mpivars); rank=_node_rank(gl,zone.is,zone.js,zone.ke+1); if (rank!=thisrank){ if (MPI_Bsend(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); if (MPI_Bsend(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in update_dUstar_emfield_SOR"); } #endif rank=_node_rank(gl,zone.is-1,zone.js,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is-1,zone.js,zone.ks,zone.is-1,zone.je,zone.ke, flux, numvars,mpivars); } rank=_node_rank(gl,zone.is,zone.js-1,zone.ks); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.js-1,zone.ks,zone.ie,zone.js-1,zone.ke, flux, numvars,mpivars); } #ifdef _3DL rank=_node_rank(gl,zone.is,zone.js,zone.ks-1); if (rank!=thisrank){ if (MPI_Recv(&numvars,1,MPI_INT,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); mpivars=(double *)realloc(mpivars,sizeof(double)*numvars); if (MPI_Recv(mpivars,numvars,MPI_DOUBLE,rank,0,MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in update_dUstar_emfield_SOR"); copy_mpivars_in_zone(np,gl,zone.is,zone.js,zone.ks-1,zone.ie,zone.je,zone.ks-1, flux, numvars,mpivars); } #endif #ifndef NDEBUG for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD) && !np[_ai(gl,i,j,k)].bs->TSEMF_UPDATED) fatal_error("Node not updated correctly at i=%ld j=%ld k=%ld.",i,j,k); } #endif MPI_Buffer_detach( &bbuffer, &bbuffersize ); } for (plane=planestart; plane<=planeend; plane++){ free(lplane[plane]); } MPI_Barrier(MPI_COMM_WORLD); free(buffer); free(mpivars); } void update_dUstar_emfield_SOR(np_t *np, gl_t *gl, long flux, zone_t zone){ long cycle,numcycle,numsubiter; find_tsemf_SOR_numsubiter_numcycle(gl->numsubiter_tsemf, TSEMF_SOR_NUMSUBITEROPT, &numsubiter, &numcycle); for (cycle=0; cycle<numcycle; cycle++){ update_dUstar_emfield_SOR_forward(np, gl, flux, zone, numsubiter); update_dUstar_emfield_SOR_backward(np, gl, flux, zone, numsubiter); } } #else //not DISTMPI void update_dUstar_emfield_SOR_istation(np_t *np, gl_t *gl, long flux, long i, zone_t zone, int SOR_SWEEP, long iter){ long j,k,l,dim,theta,thetasgn; double sum,RHS,Cp0,Cp1,dtau; for_2DL(j,zone.js,zone.je){ for_3DL(k,zone.ks,zone.ke){ l=0; // to avoid compiler warning switch (SOR_SWEEP) { case SOR_SWEEP_FORWARD: l=_ai(gl,i,j,k); break; case SOR_SWEEP_BACKWARD: l=_ai(gl,i,zone.je-(j-zone.js),zone.ke-(k-zone.ks)); break; default: fatal_error("SOR_SWEEP must be set to either SOR_SWEEP_FORWARD or SOR_SWEEP_BACKWARD in update_dUstar_emfield_SOR_istation."); } if (is_node_valid(np[l],TYPELEVEL_EMFIELD)) { if (is_node_inner(np[l],TYPELEVEL_EMFIELD)) { /* for inner node */ sum=-np[l].bs->Resemfield[flux]; for (dim=0; dim<nd; dim++){ sum-=np[l].bs->coeffp1[dim][flux]*np[_al(gl,l,dim,+1)].bs->dUstaremfield[flux] +np[l].bs->coeffm1[dim][flux]*np[_al(gl,l,dim,-1)].bs->dUstaremfield[flux]; } dtau=np[l].bs->dtauemfield[flux]; RHS=(1.0-gl->relaxEMF)*np[l].bs->dUstaremfield[flux]+gl->relaxEMF/(np[l].bs->coeffp0sum[flux]+1.0/dtau)*sum; np[l].bs->dUstaremfield[flux]=RHS; } else { /* for bdry node */ if (find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &theta, &thetasgn)){ find_linearization_coefficients_bdry_node_emfield(np, gl, l, theta, thetasgn, flux, _node_type(np[l], TYPELEVEL_EMFIELD), &Cp0, &Cp1,&sum); sum-=Cp1*np[_al(gl,l,theta,thetasgn)].bs->dUstaremfield[flux]; RHS=(1.0-gl->relaxEMF)*np[l].bs->dUstaremfield[flux]+gl->relaxEMF/Cp0*sum; np[l].bs->dUstaremfield[flux]=RHS; } } } } } } void update_dUstar_emfield_SOR(np_t *np, gl_t *gl, long flux, zone_t zone){ long i,cnt,ilocal,numsubiter,numcycle,cycle; find_tsemf_SOR_numsubiter_numcycle(gl->numsubiter_tsemf, TSEMF_SOR_NUMSUBITEROPT, &numsubiter, &numcycle); for (cycle=0; cycle<numcycle; cycle++){ for (i=zone.is; i<=zone.ie+(numsubiter-1)*2; i++){ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt,ilocal) schedule(dynamic) #endif for (cnt=0; cnt<numsubiter; cnt++) { ilocal=i-cnt*2; if (ilocal>=zone.is && ilocal<=zone.ie) { update_dUstar_emfield_SOR_istation(np, gl, flux, ilocal, zone, SOR_SWEEP_FORWARD,cnt); } } } for (i=zone.ie; i>=zone.is-(numsubiter-1)*2; i--){ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt,ilocal) schedule(dynamic) #endif for (cnt=0; cnt<numsubiter; cnt++) { ilocal=i+cnt*2; if (ilocal>=zone.is && ilocal<=zone.ie) { update_dUstar_emfield_SOR_istation(np, gl, flux, ilocal, zone, SOR_SWEEP_BACKWARD,cnt); } } } } } #endif //DISTMPI void update_U_emfield_SOR(np_t *np, gl_t *gl, zone_t zone){ long flux; sweep_with_1D_segments(np,gl,zone,&init_dUstar_emfield_SOR,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); for (flux=0; flux<nfe; flux++){ update_dUstar_emfield_SOR(np, gl, flux, zone); } sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_emfield_without_relaxation,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } #endif /* exact inversion with the bandwidth corresponding to ie-is */ void update_dUstar_emfield_Newton_ij_1(np_t *np, gl_t *gl, long k, long flux, zone_t zone){ long line,i,j,hbw,l; double C_ip1,C_im1,C_ip0,C_jp0,C_jp1,C_jm1,dtau; double *xdma; EXM_gl2D_t xdmagl; bool DIREC_FOUND; long theta,thetasgn; xdmagl.is=0; xdmagl.ie=(zone.ie-zone.is+1)*2+1+2; xdmagl.js=0; xdmagl.je=(zone.je-zone.js+1) *(zone.ie-zone.is+1)-1; hbw=(xdmagl.ie-xdmagl.is+1)/2-1; xdma=(double *)malloc((xdmagl.je-xdmagl.js+1)*(xdmagl.ie-xdmagl.is+1)*sizeof(double)); /* set it up */ /* init xdma */ for (i=xdmagl.is; i<=xdmagl.ie; i++){ for (j=xdmagl.js; j<=xdmagl.je; j++){ xdma[EXM_ai2(xdmagl,i,j)]=0.0; } } line=0; for_2DL(j,zone.js,zone.je){ for_1DL(i,zone.is,zone.ie){ /* for inner node */ l=_ai(gl,i,j,k); if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { find_linearization_coefficients_inner_node_emfield(np, gl, l, 0, flux, &C_im1, &C_ip0, &C_ip1); find_linearization_coefficients_inner_node_emfield(np, gl, l, 1, flux, &C_jm1, &C_jp0, &C_jp1); find_dtau_emfield(np,gl,l,flux,&dtau); /* i+0,j+0 */ xdma[EXM_ai2(xdmagl,hbw,line)]=C_ip0+C_jp0+1.0/dtau; /* i-1,j+0 */ xdma[EXM_ai2(xdmagl,hbw-1,line)]=C_im1; /* i+1,j+0 */ xdma[EXM_ai2(xdmagl,hbw+1,line)]=C_ip1; /* i+0,j-1 */ xdma[EXM_ai2(xdmagl,hbw-hbw+1,line)]=C_jm1; /* i+0,j+1 */ xdma[EXM_ai2(xdmagl,hbw+hbw-1,line)]=C_jp1; xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]=np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]/dtau; } else { xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; DIREC_FOUND=find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &theta, &thetasgn); if (DIREC_FOUND && theta==0) { find_linearization_coefficients_bdry_node_emfield(np, gl, l, theta, thetasgn, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+thetasgn,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (DIREC_FOUND && theta==1) { find_linearization_coefficients_bdry_node_emfield(np, gl, l, theta, thetasgn, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+thetasgn*(hbw-1),line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i+1,j+1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+hbw,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i-1,j+1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+hbw-2,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i+1,j-1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw-hbw+2,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i-1,j-1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw-hbw,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { //printf("Problem in update_dUstar_emfield_XDMA at node (%ld,%ld,%ld)\n",i,j,k); xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; } } } } } } } } else { xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; } line++; } } EXM_solve_XDMA(xdma, xdmagl); line=0; /* update dUstar */ for_2DL(j,zone.js,zone.je){ for_1DL(i,zone.is,zone.ie){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]=xdma[EXM_ai2(xdmagl,xdmagl.ie,line)] /xdma[EXM_ai2(xdmagl,hbw,line)]; } line++; } } free(xdma); } /* exact inversion with the bandwidth corresponding to je-js */ void update_dUstar_emfield_Newton_ij_2(np_t *np, gl_t *gl, long k, long flux, zone_t zone){ long line,i,j,hbw,l; double *xdma; EXM_gl2D_t xdmagl; double C_ip1,C_im1,C_ip0,C_jp0,C_jp1,C_jm1,dtau; bool DIREC_FOUND; long theta,thetasgn; xdmagl.is=0; xdmagl.ie=(zone.je-zone.js+1)*2+1+2; xdmagl.js=0; xdmagl.je=(zone.je-zone.js+1) *(zone.ie-zone.is+1)-1; hbw=(xdmagl.ie-xdmagl.is+1)/2-1; xdma=(double *)malloc((xdmagl.je-xdmagl.js+1)*(xdmagl.ie-xdmagl.is+1)*sizeof(double)); /* set it up */ /* init xdma */ for (i=xdmagl.is; i<=xdmagl.ie; i++){ for (j=xdmagl.js; j<=xdmagl.je; j++){ xdma[EXM_ai2(xdmagl,i,j)]=0.0; } } line=0; for_1DL(i,zone.is,zone.ie){ for_2DL(j,zone.js,zone.je){ l=_ai(gl,i,j,k); if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { /* for inner node */ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { find_linearization_coefficients_inner_node_emfield(np, gl, l, 0, flux, &C_im1, &C_ip0, &C_ip1); find_linearization_coefficients_inner_node_emfield(np, gl, l, 1, flux, &C_jm1, &C_jp0, &C_jp1); find_dtau_emfield(np,gl,l,flux,&dtau); /* j+0,i+0 */ xdma[EXM_ai2(xdmagl,hbw,line)]=(C_ip0)+(C_jp0)+1.0/dtau; /* j-1,i+0 */ xdma[EXM_ai2(xdmagl,hbw-1,line)]=C_jm1; /* j+1,i+0 */ xdma[EXM_ai2(xdmagl,hbw+1,line)]=C_jp1; /* j+0,i-1 */ xdma[EXM_ai2(xdmagl,hbw-hbw+1,line)]=C_im1; /* j+0,i+1 */ xdma[EXM_ai2(xdmagl,hbw+hbw-1,line)]=C_ip1; xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]=np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]/dtau; } else { xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; DIREC_FOUND=find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &theta, &thetasgn); if (DIREC_FOUND && theta==0) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), theta, thetasgn, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+thetasgn*(hbw-1),line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]) ); } else { if (DIREC_FOUND && theta==1) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), theta, thetasgn, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+thetasgn,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]) ); } else { if (is_node_inner(np[_ai(gl,i+1,j+1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+hbw,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]) ); } else { if (is_node_inner(np[_ai(gl,i-1,j+1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw-hbw+2,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]) ); } else { if (is_node_inner(np[_ai(gl,i+1,j-1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+hbw-2,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]) ); } else { if (is_node_inner(np[_ai(gl,i-1,j-1,k)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 0, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw-hbw,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]) ); } else { //printf("Problem in EEF_UpdateQtildeXDMAj at node (%ld,%ld,%ld)\n",i,j,k); xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; } } } } } } } } else { xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; } line++; } } EXM_solve_XDMA(xdma, xdmagl); line=0; /* update dU */ for_1DL(i,zone.is,zone.ie){ for_2DL(j,zone.js,zone.je){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]=xdma[EXM_ai2(xdmagl,xdmagl.ie,line)] /xdma[EXM_ai2(xdmagl,hbw,line)]; } line++; } } free(xdma); } void update_dUstar_emfield_Newton_ij(np_t *np, gl_t *gl, long k, long flux, zone_t zone){ /* first check if zone is valid */ if (zone.js!=gl->domain_all.js || zone.je!=gl->domain_all.je || zone.is!=gl->domain_all.is || zone.ie!=gl->domain_all.ie) { fatal_error("The tsemf time stepping Newton_ij can not be used if the domain is split along i or j with MPI."); } if (zone.je-zone.js<zone.ie-zone.is) { update_dUstar_emfield_Newton_ij_2(np, gl, k, flux, zone); } else { update_dUstar_emfield_Newton_ij_1(np, gl, k, flux, zone); } } void update_dUstar_emfield_Newton_jk(np_t *np, gl_t *gl, long i, long flux, zone_t zone){ long line,j,k,hbw,l; double C_kp1,C_km1,C_kp0,C_jp0,C_jp1,C_jm1,dtau; double *xdma; EXM_gl2D_t xdmagl; bool DIREC_FOUND; long theta,thetasgn; /* first check if zone is valid */ if (zone.js!=gl->domain_all.js || zone.je!=gl->domain_all.je || zone.ks!=gl->domain_all.ks || zone.ke!=gl->domain_all.ke) { fatal_error("The tsemf time stepping Newton_jk can not be used if the domain is split along j or k with MPI."); } xdmagl.is=0; xdmagl.ie=(zone.ke-zone.ks+1)*2+1+2; xdmagl.js=0; xdmagl.je=(zone.je-zone.js+1) *(zone.ke-zone.ks+1)-1; hbw=(xdmagl.ie-xdmagl.is+1)/2-1; xdma=(double *)malloc((xdmagl.je-xdmagl.js+1)*(xdmagl.ie-xdmagl.is+1)*sizeof(double)); /* set it up */ /* init xdma */ for (k=xdmagl.is; k<=xdmagl.ie; k++){ for (j=xdmagl.js; j<=xdmagl.je; j++){ xdma[EXM_ai2(xdmagl,k,j)]=0.0; } } line=0; for_2DL(j,zone.js,zone.je){ for_3DL(k,zone.ks,zone.ke){ /* for inner node */ l=_ai(gl,i,j,k); if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { find_linearization_coefficients_inner_node_emfield(np, gl, l, 2, flux, &C_km1, &C_kp0, &C_kp1); find_linearization_coefficients_inner_node_emfield(np, gl, l, 1, flux, &C_jm1, &C_jp0, &C_jp1); find_dtau_emfield(np,gl,l,flux,&dtau); /* k+0,j+0 */ xdma[EXM_ai2(xdmagl,hbw,line)]=C_kp0+C_jp0+1.0/dtau; /* k-1,j+0 */ xdma[EXM_ai2(xdmagl,hbw-1,line)]=C_km1; /* k+1,j+0 */ xdma[EXM_ai2(xdmagl,hbw+1,line)]=C_kp1; /* k+0,j-1 */ xdma[EXM_ai2(xdmagl,hbw-hbw+1,line)]=C_jm1; /* k+0,j+1 */ xdma[EXM_ai2(xdmagl,hbw+hbw-1,line)]=C_jp1; assert(dtau!=0.0); xdma[EXM_ai2(xdmagl,xdmagl.ie,line)]=np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]/dtau; } else { xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; DIREC_FOUND=find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &theta, &thetasgn); if (DIREC_FOUND && theta==2) { find_linearization_coefficients_bdry_node_emfield(np, gl, l, theta, thetasgn, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+thetasgn,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (DIREC_FOUND && theta==0) { xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; } else { if (DIREC_FOUND && theta==1) { find_linearization_coefficients_bdry_node_emfield(np, gl, l, theta, thetasgn, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+thetasgn*(hbw-1),line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i,j+1,k+1)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 2, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+hbw,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i,j+1,k-1)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 2, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw+hbw-2,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i,j-1,k+1)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 2, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw-hbw+2,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { if (is_node_inner(np[_ai(gl,i,j-1,k-1)], TYPELEVEL_EMFIELD)) { find_linearization_coefficients_bdry_node_emfield(np, gl, _ai(gl,i,j,k), 2, +0, flux, _node_type(np[_ai(gl,i,j,k)], TYPELEVEL_EMFIELD), &(xdma[EXM_ai2(xdmagl,hbw,line)]), &(xdma[EXM_ai2(xdmagl,hbw-hbw,line)]), &(xdma[EXM_ai2(xdmagl,xdmagl.ie,line)])); } else { //printf("Problem in update_dUstar_emfield_XDMA at node (%ld,%ld,%ld)\n",i,j,k); xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; } } } } } } } } } else { xdma[EXM_ai2(xdmagl,hbw,line)]=1.0; } line++; } } EXM_solve_XDMA(xdma, xdmagl); line=0; /* update dUstar */ for_2DL(j,zone.js,zone.je){ for_3DL(k,zone.ks,zone.ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ assert(xdma[EXM_ai2(xdmagl,hbw,line)]!=0.0); np[_ai(gl,i,j,k)].bs->dUstaremfield[flux]=xdma[EXM_ai2(xdmagl,xdmagl.ie,line)] /xdma[EXM_ai2(xdmagl,hbw,line)]; } line++; } } free(xdma); } void update_U_emfield_ADIi(np_t *np, gl_t *gl, zone_t zone){ long flux,i; sweep_with_1D_segments(np,gl,zone,&init_dUstar_emfield_ADI,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #ifndef _3D fatal_error("The tsemf pseudotime method TSEMF_ADIi can not be used in 2D or 1D.\n"); #endif #ifdef OPENMPTHREADS #pragma omp parallel for private(i,flux) schedule(dynamic) #endif for (i=zone.is; i<=zone.ie; i++) { for (flux=0; flux<nfe; flux++) { update_dUstar_emfield_Newton_jk(np, gl, i, flux, zone); } } sweep_with_1D_segments(np,gl,zone,&update_dUstar_emfield_ADI,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_emfield,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } void update_U_emfield_ADIk(np_t *np, gl_t *gl, zone_t zone){ long flux; sweep_with_1D_segments(np,gl,zone,&init_dUstar_emfield_ADI,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #ifdef _3D long k; #ifdef OPENMPTHREADS #pragma omp parallel for private(k,flux) schedule(dynamic) #endif for (k=zone.ks; k<=zone.ke; k++) { for (flux=0; flux<nfe; flux++) { update_dUstar_emfield_Newton_ij(np, gl, k, flux, zone); } } #else for (flux=0; flux<nfe; flux++) { update_dUstar_emfield_Newton_ij(np, gl, 1, flux, zone); } #endif sweep_with_1D_segments(np,gl,zone,&update_dUstar_emfield_ADI,SWEEPTYPE_K, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_emfield,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } void init_dUstar_emfield_Newton(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; double dtau; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ assert_np(np[l],is_node_inner(np[l],TYPELEVEL_EMFIELD)); for (flux=0; flux<nfe; flux++) { find_dtau_emfield(np,gl,l,flux,&dtau); np[l].bs->dUstaremfield[flux]=-np[l].bs->Resemfield[flux]*dtau; } } } void update_U_emfield_Newton(np_t *np, gl_t *gl, zone_t zone){ long flux; #ifdef _3D fatal_error("Newton method can not be used in 3D."); #endif sweep_with_1D_segments(np,gl,zone,&init_dUstar_emfield_Newton,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); for (flux=0; flux<nfe; flux++){ update_dUstar_emfield_Newton_ij(np, gl, 1, flux, zone); } sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_emfield,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } #endif
GB_unaryop__lnot_int64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint16 // op(A') function: GB_tran__lnot_int64_uint16 // C type: int64_t // A type: uint16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint16 ( int64_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
TemporalMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/TemporalMaxPooling.c" #else static inline void THNN_(TemporalMaxPooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THIndexTensor *indices, int kW, int dW) { int64_t niframe; int64_t framesize; int64_t noframe; int dimS = 0; // sequence dimension int dimF = 1; // feature dimension int ndims = input->nDimension; if (input->nDimension == 3) { dimS = 1; dimF = 2; } niframe = input->size[dimS]; framesize = input->size[dimF]; noframe = (niframe - kW) / dW + 1; THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THNN_ARGCHECK(input->nDimension == 2 || input->nDimension == 3, 2, input, "2D or 3D (batch mode) tensor expected for input, but got: %s"); THArgCheck(input->size[dimS] >= kW, 2, "input sequence smaller than kernel size. Got: %d, Expected: %d", input->size[dimS], kW); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimS, noframe); THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimF, framesize) } if (indices != NULL) { THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimS, noframe); THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimF, framesize); } } void THNN_(TemporalMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int kW, int dW) { int64_t niframe; int64_t framesize; int64_t noframe; real *input_data; real *output_data; THIndex_t *indices_data; int64_t t, y; int dimS = 0; // sequence dimension int dimF = 1; // feature dimension THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW); if (input->nDimension == 3) { dimS = 1; dimF = 2; } /* sizes */ niframe = input->size[dimS]; framesize = input->size[dimF]; noframe = (niframe - kW) / dW + 1; /* get contiguous input */ input = THTensor_(newContiguous)(input); if (input->nDimension == 2) { /* resize output */ THTensor_(resize2d)(output, noframe, framesize); /* indices will contain index locations for each output point */ THIndexTensor_(resize2d)(indices, noframe, framesize); /* get raw pointers */ input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); for(t = 0; t < noframe; t++) { real *ip = input_data + t*framesize*dW; real *op = output_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = -1; real maxval = -THInf; int64_t x; for(x = 0; x < kW; x++) { real val = ip[x*framesize+y]; if (val > maxval) { maxval = val; maxindex = x; } } /* set output to local max */ op[y] = maxval; xp[y] = (real)maxindex; } } } else { /* number of batch frames */ int64_t nbframe = input->size[0]; int64_t i; /* resize output */ THTensor_(resize3d)(output, nbframe, noframe, framesize); /* indices will contain index locations for each output point */ THIndexTensor_(resize3d)(indices, nbframe, noframe, framesize); /* get raw pointers */ input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); for(i = 0; i < nbframe; i++) { real *inputSample_data = input_data + i*niframe*framesize; real *outputSample_data = output_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { real *ip = inputSample_data + t*framesize*dW; real *op = outputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = -1; real maxval = -THInf; int64_t x; for(x = 0; x < kW; x++) { real val = ip[x*framesize+y]; if (val > maxval) { maxval = val; maxindex = x; } } /* set output to local max */ op[y] = maxval; xp[y] = (real)maxindex; } } } } /* cleanup */ THTensor_(free)(input); } void THNN_(TemporalMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices, int kW, int dW) { int64_t niframe; int noframe; int64_t framesize; real *gradInput_data; real *gradOutput_data; THIndex_t *indices_data; int64_t t, y; THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize and zero */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); int dimS = 0; // sequence dimension int dimF = 1; // feature dimension if (input->nDimension == 3) { dimS = 1; dimF = 2; } /* sizes */ niframe = input->size[dimS]; noframe = gradOutput->size[dimS]; framesize = gradOutput->size[dimF]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THIndexTensor_(data)(indices); if (input->nDimension == 2) { for(t = 0; t < noframe; t++) { real *gip = gradInput_data + t*framesize*dW; real *gop = gradOutput_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = (int64_t)xp[y]; if (maxindex != -1) gip[maxindex*framesize+y] += gop[y]; } } } else { /* number of batch frames */ int64_t nbframe = input->size[0]; int64_t i; for(i = 0; i < nbframe; i++) { real *gradInputSample_data = gradInput_data + i*niframe*framesize; real *gradOutputSample_data = gradOutput_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { real *gip = gradInputSample_data + t*framesize*dW; real *gop = gradOutputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = (int64_t)xp[y]; if (maxindex != -1) gip[maxindex*framesize+y] += gop[y]; } } } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/resize.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); chop_view=AcquireCacheView(chop_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict chop_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const PixelPacket *restrict p; register IndexPacket *restrict chop_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t i; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); cmyk_images=NewImageList(); for (i=0; i < (ssize_t) GetImageListLength(images); i+=4) { cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace); image_view=AcquireCacheView(images); cmyk_view=AcquireCacheView(cmyk_image); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { SetPixelRed(q,QuantumRange-PixelIntensityToQuantum(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireCacheView(images); cmyk_view=AcquireCacheView(cmyk_image); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->green=(Quantum) (QuantumRange-PixelIntensityToQuantum(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireCacheView(images); cmyk_view=AcquireCacheView(cmyk_image); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->blue=(Quantum) (QuantumRange-PixelIntensityToQuantum(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireCacheView(images); cmyk_view=AcquireCacheView(cmyk_image); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(cmyk_view); for (x=0; x < (ssize_t) images->columns; x++) { SetPixelIndex(indexes+x,QuantumRange- PixelIntensityToQuantum(p)); p++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); AppendImageToList(&cmyk_images,cmyk_image); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((size_t) (page.x+page.width) > image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((size_t) (page.y+page.height) > image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) || ((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); crop_view=AcquireCacheView(crop_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict crop_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view); (void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (crop_indexes != (IndexPacket *) NULL)) (void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns* sizeof(*crop_indexes)); if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CropImage) #endif proceed=SetImageProgress(image,CropImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t MagickRound(MagickRealType x) { /* Round the fraction to nearest integer. */ if (x >= 0.0) return((ssize_t) (x+0.5)); return((ssize_t) (x-0.5)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry, ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) (width+(geometry.width >> 1))/geometry.width; delta.y=(double) (height+(geometry.height >> 1))/geometry.height; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); excerpt_view=AcquireCacheView(excerpt_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict excerpt_indexes, *restrict indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view); if (excerpt_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(excerpt_indexes,indexes,(size_t) excerpt_image->columns*sizeof(*excerpt_indexes)); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExcerptImage) #endif proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(extent_image,DirectClass) == MagickFalse) { InheritException(exception,&extent_image->exception); extent_image=DestroyImage(extent_image); return((Image *) NULL); } if (extent_image->background_color.opacity != OpaqueOpacity) extent_image->matte=MagickTrue; (void) SetImageBackgroundColor(extent_image); (void) CompositeImage(extent_image,image->compose,image,-geometry->x, -geometry->y); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireCacheView(image); flip_view=AcquireCacheView(flip_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict flip_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewVirtualIndexQueue(image_view); if (indexes != (const IndexPacket *) NULL) { flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view); if (flip_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns* sizeof(*flip_indexes)); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlipImage) #endif proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireCacheView(image); flop_view=AcquireCacheView(flop_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict flop_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=flop_image->columns; indexes=GetCacheViewVirtualIndexQueue(image_view); flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view); for (x=0; x < (ssize_t) flop_image->columns; x++) { (*--q)=(*p++); if ((indexes != (const IndexPacket *) NULL) && (flop_indexes != (IndexPacket *) NULL)) SetPixelIndex(flop_indexes+flop_image->columns-x-1, GetPixelIndex( indexes+x)); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlopImage) #endif proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyImageRegion(Image *destination, const Image *source,const size_t columns,const size_t rows, const ssize_t sx,const ssize_t sy,const ssize_t dx,const ssize_t dy, ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; status=MagickTrue; source_view=AcquireCacheView(source); destination_view=AcquireCacheView(destination); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict destination_indexes; register PixelPacket *restrict q; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source_view); (void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p)); if (indexes != (IndexPacket *) NULL) { destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); if (destination_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(destination_indexes,indexes,(size_t) columns*sizeof(*indexes)); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status|=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status|=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status|=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse) { InheritException(exception,&splice_image->exception); splice_image=DestroyImage(splice_image); return((Image *) NULL); } (void) SetImageBackgroundColor(splice_image); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case StaticGravity: case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); splice_view=AcquireCacheView(splice_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes, *restrict splice_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < splice_geometry.x; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x, GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x, GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes, *restrict splice_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, image->columns,1,exception); if ((y < 0) || (y >= (ssize_t) splice_image->rows)) continue; q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < splice_geometry.x; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x, GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x, GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ /* DANGER: This function destroys what it assumes to be a single image list. If the input image is part of a larger list, all other images in that list will be simply 'lost', not destroyed. Also if the crop generates a list of images only the first image is resized. And finally if the crop succeeds and the resize failed, you will get a cropped image, as well as a 'false' or 'failed' report. This function and should probably be depreciated in favor of direct calls to CropImageToTiles() or ResizeImage(), as appropriate. */ MagickExport MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry) { Image *resize_image, *transform_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ flags=ParseRegionGeometry(transform_image,image_geometry,&geometry, &(*image)->exception); (void) flags; if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,transform_image->blur,&(*image)->exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImages() calls TransformImage() on each image of a sequence. % % The format of the TransformImage method is: % % MagickBooleanType TransformImages(Image **image, % const char *crop_geometry,const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ MagickExport MagickBooleanType TransformImages(Image **images, const char *crop_geometry,const char *image_geometry) { Image *image, **image_list, *transform_images; MagickStatusType status; register ssize_t i; assert(images != (Image **) NULL); assert((*images)->signature == MagickSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); image_list=ImageListToArray(*images,&(*images)->exception); if (image_list == (Image **) NULL) return(MagickFalse); status=MagickTrue; transform_images=NewImageList(); for (i=0; image_list[i] != (Image *) NULL; i++) { image=image_list[i]; status|=TransformImage(&image,crop_geometry,image_geometry); AppendImageToList(&transform_images,image); } *images=transform_images; image_list=(Image **) RelinquishMagickMemory(image_list); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); transpose_view=AcquireCacheView(transpose_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict transpose_indexes, *restrict indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view); if (transpose_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(transpose_indexes,indexes,(size_t) image->columns*sizeof(*transpose_indexes)); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,TransposeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); transverse_view=AcquireCacheView(transverse_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register IndexPacket *restrict transverse_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y- 1),0,1,transverse_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=image->columns; for (x=0; x < (ssize_t) image->columns; x++) *--q=(*p++); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view); if (transverse_indexes != (IndexPacket *) NULL) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(transverse_indexes+image->columns-x-1, GetPixelIndex(indexes+x)); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransverseImage) #endif proceed=SetImageProgress(image,TransverseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }